aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/acpi/acpi_pad.c37
-rw-r--r--drivers/acpi/bus.c7
-rw-r--r--drivers/acpi/dock.c1
-rw-r--r--drivers/acpi/ec.c126
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/power_meter.c4
-rw-r--r--drivers/acpi/processor_idle.c72
-rw-r--r--drivers/acpi/processor_pdc.c46
-rw-r--r--drivers/acpi/processor_thermal.c3
-rw-r--r--drivers/acpi/sbs.c3
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/scan.c27
-rw-r--r--drivers/acpi/sleep.c29
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/video.c51
-rw-r--r--drivers/ata/ahci.c15
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-core.c38
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c3
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/devtmpfs.c3
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/main.c9
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/aoe/aoecmd.c17
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/block/drbd/Kconfig2
-rw-r--r--drivers/block/drbd/drbd_int.h9
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_nl.c19
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c49
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/bluetooth/Kconfig13
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c187
-rw-r--r--drivers/bluetooth/bluecard_cs.c4
-rw-r--r--drivers/bluetooth/bt3c_cs.c4
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c1
-rw-r--r--drivers/bluetooth/btuart_cs.c4
-rw-r--r--drivers/bluetooth/dtl1_cs.c4
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/amd64-agp.c20
-rw-r--r--drivers/char/agp/backend.c13
-rw-r--r--drivers/char/agp/hp-agp.c6
-rw-r--r--drivers/char/agp/intel-agp.c6
-rw-r--r--drivers/char/hw_random/core.c5
-rw-r--r--drivers/char/hw_random/virtio-rng.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c4
-rw-r--r--drivers/char/mem.c30
-rw-r--r--drivers/char/nozomi.c2
-rw-r--r--drivers/char/random.c9
-rw-r--r--drivers/char/sonypi.c11
-rw-r--r--drivers/char/toshiba.c12
-rw-r--r--drivers/char/tpm/tpm_infineon.c79
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/char/uv_mmtimer.c18
-rw-r--r--drivers/connector/connector.c175
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/cpuidle/governors/menu.c12
-rw-r--r--drivers/crypto/padlock-sha.c23
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/ioat/dma.h18
-rw-r--r--drivers/dma/ioat/dma_v2.c69
-rw-r--r--drivers/dma/ioat/dma_v2.h2
-rw-r--r--drivers/dma/ioat/dma_v3.c60
-rw-r--r--drivers/dma/ioat/registers.h1
-rw-r--r--drivers/dma/ipu/ipu_idmac.c25
-rw-r--r--drivers/dma/shdma.c324
-rw-r--r--drivers/dma/shdma.h9
-rw-r--r--drivers/edac/amd64_edac.c17
-rw-r--r--drivers/edac/i5000_edac.c8
-rw-r--r--drivers/edac/mpc85xx_edac.c8
-rw-r--r--drivers/firewire/Kconfig44
-rw-r--r--drivers/firewire/core-card.c41
-rw-r--r--drivers/firewire/core-cdev.c39
-rw-r--r--drivers/firewire/core-transaction.c118
-rw-r--r--drivers/firewire/net.c53
-rw-r--r--drivers/firewire/ohci.c21
-rw-r--r--drivers/gpio/Kconfig9
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/adp5588-gpio.c266
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/ati_pcigart.c10
-rw-r--r--drivers/gpu/drm/drm_bufs.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c29
-rw-r--r--drivers/gpu/drm/drm_edid.c64
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c13
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c90
-rw-r--r--drivers/gpu/drm/drm_pci.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c33
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c36
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c294
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h139
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c390
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c46
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c132
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h14
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c35
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h40
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c732
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c81
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c116
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c205
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c245
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h77
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c102
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c77
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c30
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c35
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c34
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c159
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c117
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c61
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c53
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c116
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c14
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h801
-rw-r--r--drivers/gpu/drm/radeon/atom.c109
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c259
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c39
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c42
-rw-r--r--drivers/gpu/drm/radeon/r200.c7
-rw-r--r--drivers/gpu/drm/radeon/r300.c33
-rw-r--r--drivers/gpu/drm/radeon/r420.c48
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c207
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c28
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/r600d.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c52
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c179
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c77
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h56
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c105
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r2002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r420795
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs60068
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5156
-rw-r--r--drivers/gpu/drm/radeon/rs400.c30
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770.c79
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c75
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c125
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c37
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c64
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/hid-apple.c7
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-samsung.c25
-rw-r--r--drivers/hid/hid-wacom.c4
-rw-r--r--drivers/hwmon/Kconfig12
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/adt7462.c4
-rw-r--r--drivers/hwmon/amc6821.c1115
-rw-r--r--drivers/hwmon/asus_atk0110.c304
-rw-r--r--drivers/hwmon/coretemp.c16
-rw-r--r--drivers/hwmon/fschmd.c7
-rw-r--r--drivers/hwmon/k10temp.c40
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/hwmon/lm78.c25
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/via686a.c2
-rw-r--r--drivers/hwmon/vt8231.c2
-rw-r--r--drivers/hwmon/w83781d.c26
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c8
-rw-r--r--drivers/i2c/busses/i2c-imx.c26
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c4
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c4
-rw-r--r--drivers/i2c/busses/i2c-piix4.c4
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c12
-rw-r--r--drivers/i2c/busses/i2c-viapro.c4
-rw-r--r--drivers/i2c/i2c-core.c7
-rw-r--r--drivers/ieee1394/Kconfig59
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c4
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c6
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/ff-memless.c48
-rw-r--r--drivers/input/input.c87
-rw-r--r--drivers/input/joystick/gf2k.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c29
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c29
-rw-r--r--drivers/input/joystick/iforce/iforce.h2
-rw-r--r--drivers/input/joystick/xpad.c4
-rw-r--r--drivers/input/keyboard/atkbd.c74
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c8
-rw-r--r--drivers/input/keyboard/locomokbd.c32
-rw-r--r--drivers/input/keyboard/matrix_keypad.c29
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c11
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c14
-rw-r--r--drivers/input/misc/winbond-cir.c2
-rw-r--r--drivers/input/misc/wistron_btns.c2
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/bcm5974.c44
-rw-r--r--drivers/input/mouse/hgpk.c1
-rw-r--r--drivers/input/mouse/lifebook.c8
-rw-r--r--drivers/input/mouse/psmouse-base.c83
-rw-r--r--drivers/input/mouse/sentelic.c6
-rw-r--r--drivers/input/mouse/synaptics.c10
-rw-r--r--drivers/input/mouse/synaptics.h1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/i8042.c8
-rw-r--r--drivers/input/serio/serio.c11
-rw-r--r--drivers/input/touchscreen/ad7879.c197
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/lguest/segments.c4
-rw-r--r--drivers/macintosh/smu.c2
-rw-r--r--drivers/macintosh/therm_pm72.c2
-rw-r--r--drivers/macintosh/therm_windtunnel.c2
-rw-r--r--drivers/md/dm-log-userspace-transfer.c10
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-region-hash.c5
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-sysfs.c8
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--drivers/md/dm.c21
-rw-r--r--drivers/md/md.c56
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/IR/ir-keytable.c2
-rw-r--r--drivers/media/common/saa7146_video.c4
-rw-r--r--drivers/media/common/tuners/tda8290.c12
-rw-r--r--drivers/media/dvb/Kconfig4
-rw-r--r--drivers/media/dvb/Makefile14
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c20
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c12
-rw-r--r--drivers/media/dvb/frontends/Kconfig19
-rw-r--r--drivers/media/dvb/frontends/Makefile2
-rw-r--r--drivers/media/dvb/frontends/dib8000.h2
-rw-r--r--drivers/media/dvb/frontends/lgdt3305.h6
-rw-r--r--drivers/media/dvb/frontends/mb86a16.c1878
-rw-r--r--drivers/media/dvb/frontends/mb86a16.h52
-rw-r--r--drivers/media/dvb/frontends/mb86a16_priv.h151
-rw-r--r--drivers/media/dvb/frontends/tda10021.c4
-rw-r--r--drivers/media/dvb/frontends/tda665x.c257
-rw-r--r--drivers/media/dvb/frontends/tda665x.h52
-rw-r--r--drivers/media/dvb/mantis/Kconfig32
-rw-r--r--drivers/media/dvb/mantis/Makefile28
-rw-r--r--drivers/media/dvb/mantis/hopper_cards.c275
-rw-r--r--drivers/media/dvb/mantis/hopper_vp3028.c88
-rw-r--r--drivers/media/dvb/mantis/hopper_vp3028.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_ca.c207
-rw-r--r--drivers/media/dvb/mantis/mantis_ca.h27
-rw-r--r--drivers/media/dvb/mantis/mantis_cards.c305
-rw-r--r--drivers/media/dvb/mantis/mantis_common.h179
-rw-r--r--drivers/media/dvb/mantis/mantis_core.c238
-rw-r--r--drivers/media/dvb/mantis/mantis_core.h57
-rw-r--r--drivers/media/dvb/mantis/mantis_dma.c256
-rw-r--r--drivers/media/dvb/mantis/mantis_dma.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_dvb.c296
-rw-r--r--drivers/media/dvb/mantis/mantis_dvb.h35
-rw-r--r--drivers/media/dvb/mantis/mantis_evm.c117
-rw-r--r--drivers/media/dvb/mantis/mantis_hif.c240
-rw-r--r--drivers/media/dvb/mantis/mantis_hif.h29
-rw-r--r--drivers/media/dvb/mantis/mantis_i2c.c267
-rw-r--r--drivers/media/dvb/mantis/mantis_i2c.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_input.c148
-rw-r--r--drivers/media/dvb/mantis/mantis_ioc.c130
-rw-r--r--drivers/media/dvb/mantis/mantis_ioc.h51
-rw-r--r--drivers/media/dvb/mantis/mantis_link.h83
-rw-r--r--drivers/media/dvb/mantis/mantis_pci.c177
-rw-r--r--drivers/media/dvb/mantis/mantis_pci.h27
-rw-r--r--drivers/media/dvb/mantis/mantis_pcmcia.c120
-rw-r--r--drivers/media/dvb/mantis/mantis_reg.h197
-rw-r--r--drivers/media/dvb/mantis/mantis_uart.c186
-rw-r--r--drivers/media/dvb/mantis/mantis_uart.h58
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1033.c212
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1033.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1034.c119
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1034.h33
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.c358
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.h33
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2033.c187
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2033.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2040.c186
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2040.h32
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3028.c38
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3028.h33
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3030.c105
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3030.h30
-rw-r--r--drivers/media/video/gspca/gspca.c2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c6
-rw-r--r--drivers/media/video/gspca/ov534.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h1
-rw-r--r--drivers/media/video/gspca/sunplus.c26
-rw-r--r--drivers/media/video/gspca/vc032x.c4
-rw-r--r--drivers/media/video/mx1_camera.c2
-rw-r--r--drivers/media/video/rj54n1cb0c.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c13
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c8
-rw-r--r--drivers/media/video/saa7134/saa7134-ts.c13
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c2
-rw-r--r--drivers/media/video/uvc/uvc_queue.c13
-rw-r--r--drivers/media/video/uvc/uvc_video.c45
-rw-r--r--drivers/media/video/uvc/uvcvideo.h5
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/i2o/i2o_config.c13
-rw-r--r--drivers/mfd/Makefile8
-rw-r--r--drivers/mfd/asic3.c40
-rw-r--r--drivers/mfd/mc13783-core.c4
-rw-r--r--drivers/mfd/t7l66xb.c55
-rw-r--r--drivers/mfd/tc6387xb.c119
-rw-r--r--drivers/mfd/tc6393xb.c56
-rw-r--r--drivers/mfd/tmio_core.c52
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--drivers/mfd/wm8350-core.c3
-rw-r--r--drivers/mfd/wm8350-irq.c4
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/card/mmc_test.c9
-rw-r--r--drivers/mmc/card/queue.c18
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.c59
-rw-r--r--drivers/mmc/host/tmio_mmc.h46
-rw-r--r--drivers/mtd/maps/Kconfig17
-rw-r--r--drivers/mtd/maps/pismo.c320
-rw-r--r--drivers/mtd/mtdoops.c2
-rw-r--r--drivers/mtd/tests/mtd_readtest.c6
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c7
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c6
-rw-r--r--drivers/mtd/ubi/cdev.c1
-rw-r--r--drivers/mtd/ubi/kapi.c15
-rw-r--r--drivers/mtd/ubi/upd.c1
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/net/3c507.c4
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arm/Kconfig1
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atlx/atl2.c7
-rw-r--r--drivers/net/ax88796.c2
-rw-r--r--drivers/net/benet/be.h6
-rw-r--r--drivers/net/benet/be_cmds.c39
-rw-r--r--drivers/net/benet/be_cmds.h19
-rw-r--r--drivers/net/benet/be_ethtool.c77
-rw-r--r--drivers/net/benet/be_main.c35
-rw-r--r--drivers/net/bfin_mac.c5
-rw-r--r--drivers/net/bnx2x_main.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/davinci_emac.c4
-rw-r--r--drivers/net/e1000/e1000.h2
-rw-r--r--drivers/net/e1000/e1000_main.c43
-rw-r--r--drivers/net/e1000e/82571.c2
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c78
-rw-r--r--drivers/net/e1000e/lib.c54
-rw-r--r--drivers/net/e1000e/netdev.c87
-rw-r--r--drivers/net/e1000e/phy.c85
-rw-r--r--drivers/net/fsl_pq_mdio.c30
-rw-r--r--drivers/net/gianfar.c13
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/ibmlana.c3
-rw-r--r--drivers/net/igb/e1000_82575.c4
-rw-r--r--drivers/net/igb/e1000_phy.c9
-rw-r--r--drivers/net/igb/igb_ethtool.c2
-rw-r--r--drivers/net/igb/igb_main.c33
-rw-r--r--drivers/net/igbvf/netdev.c18
-rw-r--r--drivers/net/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c24
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c18
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c45
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ks8851_mll.c4
-rw-r--r--drivers/net/ll_temac_main.c2
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mv643xx_eth.c6
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c193
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c14
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c4
-rw-r--r--drivers/net/pcnet32.c3
-rw-r--r--drivers/net/phy/broadcom.c4
-rw-r--r--drivers/net/phy/mdio_bus.c72
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/phy/phy_device.c31
-rw-r--r--drivers/net/qlge/qlge_main.c15
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sfc/efx.c7
-rw-r--r--drivers/net/sfc/falcon.c1
-rw-r--r--drivers/net/sfc/falcon_xmac.c38
-rw-r--r--drivers/net/sfc/mcdi.c12
-rw-r--r--drivers/net/sfc/mcdi.h1
-rw-r--r--drivers/net/sfc/mcdi_pcol.h4
-rw-r--r--drivers/net/sfc/mcdi_phy.c93
-rw-r--r--drivers/net/sfc/mtd.c5
-rw-r--r--drivers/net/sfc/net_driver.h1
-rw-r--r--drivers/net/sfc/nic.c2
-rw-r--r--drivers/net/sfc/qt202x_phy.c246
-rw-r--r--drivers/net/sfc/selftest.c8
-rw-r--r--drivers/net/sfc/siena.c1
-rw-r--r--drivers/net/sfc/tenxpress.c138
-rw-r--r--drivers/net/sfc/tx.c4
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/sky2.c54
-rw-r--r--drivers/net/starfire.c5
-rw-r--r--drivers/net/tg3.c27
-rw-r--r--drivers/net/tg3.h3
-rw-r--r--drivers/net/tulip/Kconfig4
-rw-r--r--drivers/net/tulip/dmfe.c21
-rw-r--r--drivers/net/tulip/tulip_core.c33
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/ucc_geth.c48
-rw-r--r--drivers/net/ucc_geth.h13
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/hso.c105
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/via-rhine.c41
-rw-r--r--drivers/net/via-velocity.c49
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/vxge/vxge-main.c8
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h2
-rw-r--r--drivers/net/wimax/i2400m/usb.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c18
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c46
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/dma.c197
-rw-r--r--drivers/net/wireless/b43/dma.h7
-rw-r--r--drivers/net/wireless/b43/main.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c61
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c68
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c4
-rw-r--r--drivers/net/wireless/libertas/mesh.c4
-rw-r--r--drivers/net/wireless/libertas/scan.c22
-rw-r--r--drivers/net/wireless/libertas/wext.c2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c1
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/net/wireless/orinoco/wext.c6
-rw-r--r--drivers/net/wireless/p54/p54pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c140
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c16
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c6
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/intel-iommu.c6
-rw-r--r--drivers/pci/intr_remapping.c2
-rw-r--r--drivers/pci/pci-acpi.c10
-rw-r--r--drivers/pci/pci-sysfs.c6
-rw-r--r--drivers/pci/pci.c34
-rw-r--r--drivers/pci/pci.h8
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug4
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c34
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c4
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/portdrv_core.c16
-rw-r--r--drivers/pci/pcie/portdrv_pci.c19
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/quirks.c91
-rw-r--r--drivers/pci/search.c6
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c25
-rw-r--r--drivers/platform/x86/Kconfig29
-rw-r--r--drivers/platform/x86/dell-wmi.c18
-rw-r--r--drivers/platform/x86/eeepc-laptop.c298
-rw-r--r--drivers/platform/x86/hp-wmi.c11
-rw-r--r--drivers/platform/x86/msi-wmi.c9
-rw-r--r--drivers/platform/x86/sony-laptop.c9
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c61
-rw-r--r--drivers/platform/x86/wmi.c36
-rw-r--r--drivers/power/pmu_battery.c2
-rw-r--r--drivers/power/wm97xx_battery.c10
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/regulator/wm8350-regulator.c4
-rw-r--r--drivers/rtc/rtc-cmos.c9
-rw-r--r--drivers/rtc/rtc-fm3130.c6
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c16
-rw-r--r--drivers/s390/block/dasd_ioctl.c21
-rw-r--r--drivers/s390/block/dasd_proc.c7
-rw-r--r--drivers/s390/char/con3215.c17
-rw-r--r--drivers/s390/char/fs3270.c17
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape_block.c39
-rw-r--r--drivers/s390/char/tape_char.c18
-rw-r--r--drivers/s390/char/vmcp.c12
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chsc_sch.c23
-rw-r--r--drivers/s390/cio/qdio.h36
-rw-r--r--drivers/s390/cio/qdio_debug.c114
-rw-r--r--drivers/s390/cio/qdio_main.c75
-rw-r--r--drivers/s390/cio/qdio_perf.c149
-rw-r--r--drivers/s390/cio/qdio_perf.h62
-rw-r--r--drivers/s390/cio/qdio_thinint.c8
-rw-r--r--drivers/s390/crypto/zcrypt_api.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c9
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_fc.c93
-rw-r--r--drivers/s390/scsi/zfcp_fc.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c19
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c1
-rw-r--r--drivers/sbus/char/bbc_envctrl.c64
-rw-r--r--drivers/scsi/aacraid/aachba.c52
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/commctrl.c28
-rw-r--r--drivers/scsi/aacraid/comminit.c6
-rw-r--r--drivers/scsi/aacraid/commsup.c72
-rw-r--r--drivers/scsi/aacraid/dpcsup.c36
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c53
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c4
-rw-r--r--[-rwxr-xr-x]drivers/scsi/lpfc/lpfc_hbadisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c48
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c2
-rw-r--r--drivers/scsi/pmcraid.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c102
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/scsi/stex.c5
-rw-r--r--drivers/serial/21285.c4
-rw-r--r--drivers/serial/8250.c7
-rw-r--r--drivers/serial/8250_pnp.c12
-rw-r--r--drivers/serial/Kconfig15
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/imx.c2
-rw-r--r--drivers/serial/pmac_zilog.c11
-rw-r--r--drivers/serial/s5pv210.c154
-rw-r--r--drivers/serial/samsung.c6
-rw-r--r--drivers/serial/samsung.h19
-rw-r--r--drivers/serial/serial_core.c105
-rw-r--r--drivers/serial/serial_cs.c19
-rw-r--r--drivers/serial/sh-sci.c13
-rw-r--r--drivers/serial/uartlite.c2
-rw-r--r--drivers/spi/spi_sh_msiof.c15
-rw-r--r--drivers/ssb/main.c3
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/asus_oled/asus_oled.c12
-rw-r--r--drivers/staging/cx25821/cx25821-medusa-video.c4
-rw-r--r--drivers/staging/et131x/et1310_address_map.h18
-rw-r--r--drivers/staging/et131x/et1310_rx.c6
-rw-r--r--drivers/staging/hv/Hv.c50
-rw-r--r--drivers/staging/hv/Hv.h6
-rw-r--r--drivers/staging/hv/Vmbus.c12
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/devio.c48
-rw-r--r--drivers/usb/core/hcd.c18
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/message.c8
-rw-r--r--drivers/usb/core/sysfs.c6
-rw-r--r--drivers/usb/gadget/f_eem.c3
-rw-r--r--drivers/usb/gadget/multi.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c4
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c5
-rw-r--r--drivers/usb/host/ehci-hub.c33
-rw-r--r--drivers/usb/host/ehci-q.c11
-rw-r--r--drivers/usb/host/fhci-hcd.c3
-rw-r--r--drivers/usb/host/fhci-tds.c6
-rw-r--r--drivers/usb/host/isp1362-hcd.c25
-rw-r--r--drivers/usb/host/isp1760-hcd.c6
-rw-r--r--drivers/usb/host/r8a66597-hcd.c58
-rw-r--r--drivers/usb/host/uhci-hcd.c15
-rw-r--r--drivers/usb/host/uhci-hub.c2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/otg/Kconfig1
-rw-r--r--drivers/usb/serial/ftdi_sio.c25
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h18
-rw-r--r--drivers/usb/serial/generic.c10
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--drivers/video/backlight/omap1_bl.c2
-rw-r--r--drivers/video/imxfb.c6
-rw-r--r--drivers/video/mx3fb.c12
-rw-r--r--drivers/video/omap/dispc.c18
-rw-r--r--drivers/video/omap/lcd_htcherald.c4
-rw-r--r--drivers/video/omap/omapfb.h2
-rw-r--r--drivers/video/omap/omapfb_main.c25
-rw-r--r--drivers/video/omap/rfbi.c4
-rw-r--r--drivers/video/omap2/dss/Kconfig7
-rw-r--r--drivers/video/omap2/dss/core.c10
-rw-r--r--drivers/video/omap2/dss/dispc.c74
-rw-r--r--drivers/video/omap2/dss/dsi.c159
-rw-r--r--drivers/video/omap2/dss/dss.c6
-rw-r--r--drivers/video/omap2/dss/dss.h14
-rw-r--r--drivers/video/omap2/dss/rfbi.c30
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c6
-rw-r--r--drivers/video/pxafb.c4
-rw-r--r--drivers/video/s3c-fb.c14
-rw-r--r--drivers/video/via/accel.c5
-rw-r--r--drivers/video/via/viafbdev.c11
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--drivers/watchdog/Kconfig4
-rw-r--r--drivers/watchdog/iTCO_wdt.c45
-rw-r--r--drivers/watchdog/ixp2000_wdt.c1
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c11
-rw-r--r--drivers/xen/manage.c8
778 files changed, 22360 insertions, 7284 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8a07363417ed..368ae6d3a096 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -28,7 +28,7 @@ source "drivers/md/Kconfig"
28 28
29source "drivers/message/fusion/Kconfig" 29source "drivers/message/fusion/Kconfig"
30 30
31source "drivers/ieee1394/Kconfig" 31source "drivers/firewire/Kconfig"
32 32
33source "drivers/message/i2o/Kconfig" 33source "drivers/message/i2o/Kconfig"
34 34
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 97991ac6f5fc..7e52295f1ecc 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -208,7 +208,7 @@ static int power_saving_thread(void *data)
208 * the mechanism only works when all CPUs have RT task running, 208 * the mechanism only works when all CPUs have RT task running,
209 * as if one CPU hasn't RT task, RT task from other CPUs will 209 * as if one CPU hasn't RT task, RT task from other CPUs will
210 * borrow CPU time from this CPU and cause RT task use > 95% 210 * borrow CPU time from this CPU and cause RT task use > 95%
211 * CPU time. To make 'avoid staration' work, takes a nap here. 211 * CPU time. To make 'avoid starvation' work, takes a nap here.
212 */ 212 */
213 if (do_sleep) 213 if (do_sleep)
214 schedule_timeout_killable(HZ * idle_pct / 100); 214 schedule_timeout_killable(HZ * idle_pct / 100);
@@ -222,14 +222,18 @@ static struct task_struct *ps_tsks[NR_CPUS];
222static unsigned int ps_tsk_num; 222static unsigned int ps_tsk_num;
223static int create_power_saving_task(void) 223static int create_power_saving_task(void)
224{ 224{
225 int rc = -ENOMEM;
226
225 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, 227 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
226 (void *)(unsigned long)ps_tsk_num, 228 (void *)(unsigned long)ps_tsk_num,
227 "power_saving/%d", ps_tsk_num); 229 "power_saving/%d", ps_tsk_num);
228 if (ps_tsks[ps_tsk_num]) { 230 rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
231 if (!rc)
229 ps_tsk_num++; 232 ps_tsk_num++;
230 return 0; 233 else
231 } 234 ps_tsks[ps_tsk_num] = NULL;
232 return -EINVAL; 235
236 return rc;
233} 237}
234 238
235static void destroy_power_saving_task(void) 239static void destroy_power_saving_task(void)
@@ -237,6 +241,7 @@ static void destroy_power_saving_task(void)
237 if (ps_tsk_num > 0) { 241 if (ps_tsk_num > 0) {
238 ps_tsk_num--; 242 ps_tsk_num--;
239 kthread_stop(ps_tsks[ps_tsk_num]); 243 kthread_stop(ps_tsks[ps_tsk_num]);
244 ps_tsks[ps_tsk_num] = NULL;
240 } 245 }
241} 246}
242 247
@@ -253,7 +258,7 @@ static void set_power_saving_task_num(unsigned int num)
253 } 258 }
254} 259}
255 260
256static int acpi_pad_idle_cpus(unsigned int num_cpus) 261static void acpi_pad_idle_cpus(unsigned int num_cpus)
257{ 262{
258 get_online_cpus(); 263 get_online_cpus();
259 264
@@ -261,7 +266,6 @@ static int acpi_pad_idle_cpus(unsigned int num_cpus)
261 set_power_saving_task_num(num_cpus); 266 set_power_saving_task_num(num_cpus);
262 267
263 put_online_cpus(); 268 put_online_cpus();
264 return 0;
265} 269}
266 270
267static uint32_t acpi_pad_idle_cpus_num(void) 271static uint32_t acpi_pad_idle_cpus_num(void)
@@ -369,19 +373,21 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
369static int acpi_pad_pur(acpi_handle handle, int *num_cpus) 373static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
370{ 374{
371 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 375 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
372 acpi_status status;
373 union acpi_object *package; 376 union acpi_object *package;
374 int rev, num, ret = -EINVAL; 377 int rev, num, ret = -EINVAL;
375 378
376 status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer); 379 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
377 if (ACPI_FAILURE(status)) 380 return -EINVAL;
381
382 if (!buffer.length || !buffer.pointer)
378 return -EINVAL; 383 return -EINVAL;
384
379 package = buffer.pointer; 385 package = buffer.pointer;
380 if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) 386 if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
381 goto out; 387 goto out;
382 rev = package->package.elements[0].integer.value; 388 rev = package->package.elements[0].integer.value;
383 num = package->package.elements[1].integer.value; 389 num = package->package.elements[1].integer.value;
384 if (rev != 1) 390 if (rev != 1 || num < 0)
385 goto out; 391 goto out;
386 *num_cpus = num; 392 *num_cpus = num;
387 ret = 0; 393 ret = 0;
@@ -410,7 +416,7 @@ static void acpi_pad_ost(acpi_handle handle, int stat,
410 416
411static void acpi_pad_handle_notify(acpi_handle handle) 417static void acpi_pad_handle_notify(acpi_handle handle)
412{ 418{
413 int num_cpus, ret; 419 int num_cpus;
414 uint32_t idle_cpus; 420 uint32_t idle_cpus;
415 421
416 mutex_lock(&isolated_cpus_lock); 422 mutex_lock(&isolated_cpus_lock);
@@ -418,12 +424,9 @@ static void acpi_pad_handle_notify(acpi_handle handle)
418 mutex_unlock(&isolated_cpus_lock); 424 mutex_unlock(&isolated_cpus_lock);
419 return; 425 return;
420 } 426 }
421 ret = acpi_pad_idle_cpus(num_cpus); 427 acpi_pad_idle_cpus(num_cpus);
422 idle_cpus = acpi_pad_idle_cpus_num(); 428 idle_cpus = acpi_pad_idle_cpus_num();
423 if (!ret) 429 acpi_pad_ost(handle, 0, idle_cpus);
424 acpi_pad_ost(handle, 0, idle_cpus);
425 else
426 acpi_pad_ost(handle, 1, 0);
427 mutex_unlock(&isolated_cpus_lock); 430 mutex_unlock(&isolated_cpus_lock);
428} 431}
429 432
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index cf761b904e4a..a52126e46307 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -490,9 +490,14 @@ static void acpi_bus_osc_support(void)
490 490
491 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 491 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
492 capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ 492 capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
493#ifdef CONFIG_ACPI_PROCESSOR_AGGREGATOR 493#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
494 defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
494 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT; 495 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
495#endif 496#endif
497
498#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
499 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
500#endif
496 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) 501 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
497 return; 502 return;
498 if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) 503 if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index bbc2c1315c47..b2586f57e1f5 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle)
935 struct platform_device *dd; 935 struct platform_device *dd;
936 936
937 id = dock_station_count; 937 id = dock_station_count;
938 memset(&ds, 0, sizeof(ds));
938 dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); 939 dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
939 if (IS_ERR(dd)) 940 if (IS_ERR(dd))
940 return PTR_ERR(dd); 941 return PTR_ERR(dd);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index fd1801bdee66..d6471bb6852f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -201,14 +201,13 @@ unlock:
201 spin_unlock_irqrestore(&ec->curr_lock, flags); 201 spin_unlock_irqrestore(&ec->curr_lock, flags);
202} 202}
203 203
204static void acpi_ec_gpe_query(void *ec_cxt); 204static int acpi_ec_sync_query(struct acpi_ec *ec);
205 205
206static int ec_check_sci(struct acpi_ec *ec, u8 state) 206static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
207{ 207{
208 if (state & ACPI_EC_FLAG_SCI) { 208 if (state & ACPI_EC_FLAG_SCI) {
209 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) 209 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
210 return acpi_os_execute(OSL_EC_BURST_HANDLER, 210 return acpi_ec_sync_query(ec);
211 acpi_ec_gpe_query, ec);
212 } 211 }
213 return 0; 212 return 0;
214} 213}
@@ -249,11 +248,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
249{ 248{
250 unsigned long tmp; 249 unsigned long tmp;
251 int ret = 0; 250 int ret = 0;
252 pr_debug(PREFIX "transaction start\n");
253 /* disable GPE during transaction if storm is detected */
254 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
255 acpi_disable_gpe(NULL, ec->gpe);
256 }
257 if (EC_FLAGS_MSI) 251 if (EC_FLAGS_MSI)
258 udelay(ACPI_EC_MSI_UDELAY); 252 udelay(ACPI_EC_MSI_UDELAY);
259 /* start transaction */ 253 /* start transaction */
@@ -265,20 +259,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
265 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 259 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
266 spin_unlock_irqrestore(&ec->curr_lock, tmp); 260 spin_unlock_irqrestore(&ec->curr_lock, tmp);
267 ret = ec_poll(ec); 261 ret = ec_poll(ec);
268 pr_debug(PREFIX "transaction end\n");
269 spin_lock_irqsave(&ec->curr_lock, tmp); 262 spin_lock_irqsave(&ec->curr_lock, tmp);
270 ec->curr = NULL; 263 ec->curr = NULL;
271 spin_unlock_irqrestore(&ec->curr_lock, tmp); 264 spin_unlock_irqrestore(&ec->curr_lock, tmp);
272 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
273 /* check if we received SCI during transaction */
274 ec_check_sci(ec, acpi_ec_read_status(ec));
275 /* it is safe to enable GPE outside of transaction */
276 acpi_enable_gpe(NULL, ec->gpe);
277 } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
278 pr_info(PREFIX "GPE storm detected, "
279 "transactions will use polling mode\n");
280 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
281 }
282 return ret; 265 return ret;
283} 266}
284 267
@@ -321,7 +304,26 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
321 status = -ETIME; 304 status = -ETIME;
322 goto end; 305 goto end;
323 } 306 }
307 pr_debug(PREFIX "transaction start\n");
308 /* disable GPE during transaction if storm is detected */
309 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
310 acpi_disable_gpe(NULL, ec->gpe);
311 }
312
324 status = acpi_ec_transaction_unlocked(ec, t); 313 status = acpi_ec_transaction_unlocked(ec, t);
314
315 /* check if we received SCI during transaction */
316 ec_check_sci_sync(ec, acpi_ec_read_status(ec));
317 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
318 msleep(1);
319 /* it is safe to enable GPE outside of transaction */
320 acpi_enable_gpe(NULL, ec->gpe);
321 } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
322 pr_info(PREFIX "GPE storm detected, "
323 "transactions will use polling mode\n");
324 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
325 }
326 pr_debug(PREFIX "transaction end\n");
325end: 327end:
326 if (ec->global_lock) 328 if (ec->global_lock)
327 acpi_release_global_lock(glk); 329 acpi_release_global_lock(glk);
@@ -443,7 +445,7 @@ int ec_transaction(u8 command,
443 445
444EXPORT_SYMBOL(ec_transaction); 446EXPORT_SYMBOL(ec_transaction);
445 447
446static int acpi_ec_query(struct acpi_ec *ec, u8 * data) 448static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
447{ 449{
448 int result; 450 int result;
449 u8 d; 451 u8 d;
@@ -452,20 +454,16 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
452 .wlen = 0, .rlen = 1}; 454 .wlen = 0, .rlen = 1};
453 if (!ec || !data) 455 if (!ec || !data)
454 return -EINVAL; 456 return -EINVAL;
455
456 /* 457 /*
457 * Query the EC to find out which _Qxx method we need to evaluate. 458 * Query the EC to find out which _Qxx method we need to evaluate.
458 * Note that successful completion of the query causes the ACPI_EC_SCI 459 * Note that successful completion of the query causes the ACPI_EC_SCI
459 * bit to be cleared (and thus clearing the interrupt source). 460 * bit to be cleared (and thus clearing the interrupt source).
460 */ 461 */
461 462 result = acpi_ec_transaction_unlocked(ec, &t);
462 result = acpi_ec_transaction(ec, &t);
463 if (result) 463 if (result)
464 return result; 464 return result;
465
466 if (!d) 465 if (!d)
467 return -ENODATA; 466 return -ENODATA;
468
469 *data = d; 467 *data = d;
470 return 0; 468 return 0;
471} 469}
@@ -509,43 +507,79 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
509 507
510EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); 508EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
511 509
512static void acpi_ec_gpe_query(void *ec_cxt) 510static void acpi_ec_run(void *cxt)
513{ 511{
514 struct acpi_ec *ec = ec_cxt; 512 struct acpi_ec_query_handler *handler = cxt;
515 u8 value = 0; 513 if (!handler)
516 struct acpi_ec_query_handler *handler, copy;
517
518 if (!ec || acpi_ec_query(ec, &value))
519 return; 514 return;
520 mutex_lock(&ec->lock); 515 pr_debug(PREFIX "start query execution\n");
516 if (handler->func)
517 handler->func(handler->data);
518 else if (handler->handle)
519 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
520 pr_debug(PREFIX "stop query execution\n");
521 kfree(handler);
522}
523
524static int acpi_ec_sync_query(struct acpi_ec *ec)
525{
526 u8 value = 0;
527 int status;
528 struct acpi_ec_query_handler *handler, *copy;
529 if ((status = acpi_ec_query_unlocked(ec, &value)))
530 return status;
521 list_for_each_entry(handler, &ec->list, node) { 531 list_for_each_entry(handler, &ec->list, node) {
522 if (value == handler->query_bit) { 532 if (value == handler->query_bit) {
523 /* have custom handler for this bit */ 533 /* have custom handler for this bit */
524 memcpy(&copy, handler, sizeof(copy)); 534 copy = kmalloc(sizeof(*handler), GFP_KERNEL);
525 mutex_unlock(&ec->lock); 535 if (!copy)
526 if (copy.func) { 536 return -ENOMEM;
527 copy.func(copy.data); 537 memcpy(copy, handler, sizeof(*copy));
528 } else if (copy.handle) { 538 pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
529 acpi_evaluate_object(copy.handle, NULL, NULL, NULL); 539 return acpi_os_execute((copy->func) ?
530 } 540 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
531 return; 541 acpi_ec_run, copy);
532 } 542 }
533 } 543 }
544 return 0;
545}
546
547static void acpi_ec_gpe_query(void *ec_cxt)
548{
549 struct acpi_ec *ec = ec_cxt;
550 if (!ec)
551 return;
552 mutex_lock(&ec->lock);
553 acpi_ec_sync_query(ec);
534 mutex_unlock(&ec->lock); 554 mutex_unlock(&ec->lock);
535} 555}
536 556
557static void acpi_ec_gpe_query(void *ec_cxt);
558
559static int ec_check_sci(struct acpi_ec *ec, u8 state)
560{
561 if (state & ACPI_EC_FLAG_SCI) {
562 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
563 pr_debug(PREFIX "push gpe query to the queue\n");
564 return acpi_os_execute(OSL_NOTIFY_HANDLER,
565 acpi_ec_gpe_query, ec);
566 }
567 }
568 return 0;
569}
570
537static u32 acpi_ec_gpe_handler(void *data) 571static u32 acpi_ec_gpe_handler(void *data)
538{ 572{
539 struct acpi_ec *ec = data; 573 struct acpi_ec *ec = data;
540 u8 status;
541 574
542 pr_debug(PREFIX "~~~> interrupt\n"); 575 pr_debug(PREFIX "~~~> interrupt\n");
543 status = acpi_ec_read_status(ec);
544 576
545 advance_transaction(ec, status); 577 advance_transaction(ec, acpi_ec_read_status(ec));
546 if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0) 578 if (ec_transaction_done(ec) &&
579 (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
547 wake_up(&ec->wait); 580 wake_up(&ec->wait);
548 ec_check_sci(ec, status); 581 ec_check_sci(ec, acpi_ec_read_status(ec));
582 }
549 return ACPI_INTERRUPT_HANDLED; 583 return ACPI_INTERRUPT_HANDLED;
550} 584}
551 585
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 394ae89409c2..04b0f007c9b7 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -56,7 +56,7 @@ ACPI_MODULE_NAME("pci_link");
56static int acpi_pci_link_add(struct acpi_device *device); 56static int acpi_pci_link_add(struct acpi_device *device);
57static int acpi_pci_link_remove(struct acpi_device *device, int type); 57static int acpi_pci_link_remove(struct acpi_device *device, int type);
58 58
59static struct acpi_device_id link_device_ids[] = { 59static const struct acpi_device_id link_device_ids[] = {
60 {"PNP0C0F", 0}, 60 {"PNP0C0F", 0},
61 {"", 0}, 61 {"", 0},
62}; 62};
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 101cce3681d1..64f55b6db73c 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -46,7 +46,7 @@ static int acpi_pci_root_add(struct acpi_device *device);
46static int acpi_pci_root_remove(struct acpi_device *device, int type); 46static int acpi_pci_root_remove(struct acpi_device *device, int type);
47static int acpi_pci_root_start(struct acpi_device *device); 47static int acpi_pci_root_start(struct acpi_device *device);
48 48
49static struct acpi_device_id root_device_ids[] = { 49static const struct acpi_device_id root_device_ids[] = {
50 {"PNP0A03", 0}, 50 {"PNP0A03", 0},
51 {"", 0}, 51 {"", 0},
52}; 52};
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 22b297916519..0f30c3c1eea4 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -65,7 +65,7 @@ static int acpi_power_remove(struct acpi_device *device, int type);
65static int acpi_power_resume(struct acpi_device *device); 65static int acpi_power_resume(struct acpi_device *device);
66static int acpi_power_open_fs(struct inode *inode, struct file *file); 66static int acpi_power_open_fs(struct inode *inode, struct file *file);
67 67
68static struct acpi_device_id power_device_ids[] = { 68static const struct acpi_device_id power_device_ids[] = {
69 {ACPI_POWER_HID, 0}, 69 {ACPI_POWER_HID, 0},
70 {"", 0}, 70 {"", 0},
71}; 71};
diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
index 2ef7030a0c28..dc4ffadf8122 100644
--- a/drivers/acpi/power_meter.c
+++ b/drivers/acpi/power_meter.c
@@ -64,7 +64,7 @@ static int can_cap_in_hardware(void)
64 return force_cap_on || cap_in_hardware; 64 return force_cap_on || cap_in_hardware;
65} 65}
66 66
67static struct acpi_device_id power_meter_ids[] = { 67static const struct acpi_device_id power_meter_ids[] = {
68 {"ACPI000D", 0}, 68 {"ACPI000D", 0},
69 {"", 0}, 69 {"", 0},
70}; 70};
@@ -534,6 +534,7 @@ static void remove_domain_devices(struct acpi_power_meter_resource *resource)
534 534
535 kfree(resource->domain_devices); 535 kfree(resource->domain_devices);
536 kobject_put(resource->holders_dir); 536 kobject_put(resource->holders_dir);
537 resource->num_domain_devices = 0;
537} 538}
538 539
539static int read_domain_devices(struct acpi_power_meter_resource *resource) 540static int read_domain_devices(struct acpi_power_meter_resource *resource)
@@ -740,7 +741,6 @@ skip_unsafe_cap:
740 741
741 return res; 742 return res;
742error: 743error:
743 remove_domain_devices(resource);
744 remove_attrs(resource); 744 remove_attrs(resource);
745 return res; 745 return res;
746} 746}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d1676b1754d9..e88e8ae04fdb 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
110 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 110 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
111 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 111 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
112 (void *)2}, 112 (void *)2},
113 { set_max_cstate, "Pavilion zv5000", {
114 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
115 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
116 (void *)1},
117 { set_max_cstate, "Asus L8400B", {
118 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
119 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
120 (void *)1},
113 {}, 121 {},
114}; 122};
115 123
@@ -305,6 +313,28 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
305 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; 313 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
306 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; 314 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
307 315
316 /*
317 * FADT specified C2 latency must be less than or equal to
318 * 100 microseconds.
319 */
320 if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
321 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
322 "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
323 /* invalidate C2 */
324 pr->power.states[ACPI_STATE_C2].address = 0;
325 }
326
327 /*
328 * FADT supplied C3 latency must be less than or equal to
329 * 1000 microseconds.
330 */
331 if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
332 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
333 "C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency));
334 /* invalidate C3 */
335 pr->power.states[ACPI_STATE_C3].address = 0;
336 }
337
308 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 338 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
309 "lvl2[0x%08x] lvl3[0x%08x]\n", 339 "lvl2[0x%08x] lvl3[0x%08x]\n",
310 pr->power.states[ACPI_STATE_C2].address, 340 pr->power.states[ACPI_STATE_C2].address,
@@ -494,33 +524,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
494 return status; 524 return status;
495} 525}
496 526
497static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
498{
499
500 if (!cx->address)
501 return;
502
503 /*
504 * C2 latency must be less than or equal to 100
505 * microseconds.
506 */
507 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
508 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
509 "latency too large [%d]\n", cx->latency));
510 return;
511 }
512
513 /*
514 * Otherwise we've met all of our C2 requirements.
515 * Normalize the C2 latency to expidite policy
516 */
517 cx->valid = 1;
518
519 cx->latency_ticks = cx->latency;
520
521 return;
522}
523
524static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 527static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
525 struct acpi_processor_cx *cx) 528 struct acpi_processor_cx *cx)
526{ 529{
@@ -532,16 +535,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
532 return; 535 return;
533 536
534 /* 537 /*
535 * C3 latency must be less than or equal to 1000
536 * microseconds.
537 */
538 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
539 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
540 "latency too large [%d]\n", cx->latency));
541 return;
542 }
543
544 /*
545 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 538 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
546 * DMA transfers are used by any ISA device to avoid livelock. 539 * DMA transfers are used by any ISA device to avoid livelock.
547 * Note that we could disable Type-F DMA (as recommended by 540 * Note that we could disable Type-F DMA (as recommended by
@@ -629,7 +622,10 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
629 break; 622 break;
630 623
631 case ACPI_STATE_C2: 624 case ACPI_STATE_C2:
632 acpi_processor_power_verify_c2(cx); 625 if (!cx->address)
626 break;
627 cx->valid = 1;
628 cx->latency_ticks = cx->latency; /* Normalize latency */
633 break; 629 break;
634 630
635 case ACPI_STATE_C3: 631 case ACPI_STATE_C3:
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 30e4dc0cdf30..e306ba9aa34e 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
125 return status; 125 return status;
126} 126}
127 127
128static int early_pdc_done;
129
128void acpi_processor_set_pdc(acpi_handle handle) 130void acpi_processor_set_pdc(acpi_handle handle)
129{ 131{
130 struct acpi_object_list *obj_list; 132 struct acpi_object_list *obj_list;
@@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle)
132 if (arch_has_acpi_pdc() == false) 134 if (arch_has_acpi_pdc() == false)
133 return; 135 return;
134 136
137 if (early_pdc_done)
138 return;
139
135 obj_list = acpi_processor_alloc_pdc(); 140 obj_list = acpi_processor_alloc_pdc();
136 if (!obj_list) 141 if (!obj_list)
137 return; 142 return;
@@ -144,6 +149,36 @@ void acpi_processor_set_pdc(acpi_handle handle)
144} 149}
145EXPORT_SYMBOL_GPL(acpi_processor_set_pdc); 150EXPORT_SYMBOL_GPL(acpi_processor_set_pdc);
146 151
152static int early_pdc_optin;
153static int set_early_pdc_optin(const struct dmi_system_id *id)
154{
155 early_pdc_optin = 1;
156 return 0;
157}
158
159static int param_early_pdc_optin(char *s)
160{
161 early_pdc_optin = 1;
162 return 1;
163}
164__setup("acpi_early_pdc_eval", param_early_pdc_optin);
165
166static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
167 {
168 set_early_pdc_optin, "HP Envy", {
169 DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"),
170 DMI_MATCH(DMI_PRODUCT_NAME, "HP Envy") }, NULL},
171 {
172 set_early_pdc_optin, "HP Pavilion dv6", {
173 DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"),
174 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6") }, NULL},
175 {
176 set_early_pdc_optin, "HP Pavilion dv7", {
177 DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"),
178 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7") }, NULL},
179 {},
180};
181
147static acpi_status 182static acpi_status
148early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv) 183early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
149{ 184{
@@ -151,7 +186,7 @@ early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
151 return AE_OK; 186 return AE_OK;
152} 187}
153 188
154void acpi_early_processor_set_pdc(void) 189void __init acpi_early_processor_set_pdc(void)
155{ 190{
156 /* 191 /*
157 * Check whether the system is DMI table. If yes, OSPM 192 * Check whether the system is DMI table. If yes, OSPM
@@ -159,7 +194,16 @@ void acpi_early_processor_set_pdc(void)
159 */ 194 */
160 dmi_check_system(processor_idle_dmi_table); 195 dmi_check_system(processor_idle_dmi_table);
161 196
197 /*
198 * Allow systems to opt-in to early _PDC evaluation.
199 */
200 dmi_check_system(early_pdc_optin_table);
201 if (!early_pdc_optin)
202 return;
203
162 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 204 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
163 ACPI_UINT32_MAX, 205 ACPI_UINT32_MAX,
164 early_init_pdc, NULL, NULL, NULL); 206 early_init_pdc, NULL, NULL, NULL);
207
208 early_pdc_done = 1;
165} 209}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 140c5c5b423c..6deafb4aa0da 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -443,8 +443,7 @@ struct thermal_cooling_device_ops processor_cooling_ops = {
443#ifdef CONFIG_ACPI_PROCFS 443#ifdef CONFIG_ACPI_PROCFS
444static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset) 444static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
445{ 445{
446 struct acpi_processor *pr = (struct acpi_processor *)seq->private; 446 struct acpi_processor *pr = seq->private;
447
448 447
449 if (!pr) 448 if (!pr)
450 goto end; 449 goto end;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 52b9db8afc20..b16ddbf23a9c 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -822,7 +822,10 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
822 822
823static void acpi_battery_remove(struct acpi_sbs *sbs, int id) 823static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
824{ 824{
825#if defined(CONFIG_ACPI_SYSFS_POWER) || defined(CONFIG_ACPI_PROCFS_POWER)
825 struct acpi_battery *battery = &sbs->battery[id]; 826 struct acpi_battery *battery = &sbs->battery[id];
827#endif
828
826#ifdef CONFIG_ACPI_SYSFS_POWER 829#ifdef CONFIG_ACPI_SYSFS_POWER
827 if (battery->bat.dev) { 830 if (battery->bat.dev) {
828 if (battery->have_sysfs_alarm) 831 if (battery->have_sysfs_alarm)
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index d9339806df45..fd09229282ea 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -242,7 +242,7 @@ static int smbus_alarm(void *context)
242 case ACPI_SBS_CHARGER: 242 case ACPI_SBS_CHARGER:
243 case ACPI_SBS_MANAGER: 243 case ACPI_SBS_MANAGER:
244 case ACPI_SBS_BATTERY: 244 case ACPI_SBS_BATTERY:
245 acpi_os_execute(OSL_GPE_HANDLER, 245 acpi_os_execute(OSL_NOTIFY_HANDLER,
246 acpi_smbus_callback, hc); 246 acpi_smbus_callback, hc);
247 default:; 247 default:;
248 } 248 }
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ff9f6226085d..3e009674f333 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
1336 1336
1337 if (child) 1337 if (child)
1338 *child = device; 1338 *child = device;
1339 return 0; 1339
1340 if (device)
1341 return 0;
1342 else
1343 return -ENODEV;
1340} 1344}
1341 1345
1346/*
1347 * acpi_bus_add and acpi_bus_start
1348 *
1349 * scan a given ACPI tree and (probably recently hot-plugged)
1350 * create and add or starts found devices.
1351 *
1352 * If no devices were found -ENODEV is returned which does not
1353 * mean that this is a real error, there just have been no suitable
1354 * ACPI objects in the table trunk from which the kernel could create
1355 * a device and add/start an appropriate driver.
1356 */
1357
1342int 1358int
1343acpi_bus_add(struct acpi_device **child, 1359acpi_bus_add(struct acpi_device **child,
1344 struct acpi_device *parent, acpi_handle handle, int type) 1360 struct acpi_device *parent, acpi_handle handle, int type)
@@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
1348 memset(&ops, 0, sizeof(ops)); 1364 memset(&ops, 0, sizeof(ops));
1349 ops.acpi_op_add = 1; 1365 ops.acpi_op_add = 1;
1350 1366
1351 acpi_bus_scan(handle, &ops, child); 1367 return acpi_bus_scan(handle, &ops, child);
1352 return 0;
1353} 1368}
1354EXPORT_SYMBOL(acpi_bus_add); 1369EXPORT_SYMBOL(acpi_bus_add);
1355 1370
@@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
1357{ 1372{
1358 struct acpi_bus_ops ops; 1373 struct acpi_bus_ops ops;
1359 1374
1375 if (!device)
1376 return -EINVAL;
1377
1360 memset(&ops, 0, sizeof(ops)); 1378 memset(&ops, 0, sizeof(ops));
1361 ops.acpi_op_start = 1; 1379 ops.acpi_op_start = 1;
1362 1380
1363 acpi_bus_scan(device->handle, &ops, NULL); 1381 return acpi_bus_scan(device->handle, &ops, NULL);
1364 return 0;
1365} 1382}
1366EXPORT_SYMBOL(acpi_bus_start); 1383EXPORT_SYMBOL(acpi_bus_start);
1367 1384
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 5f2c379ab7bf..79d33d908b5a 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -81,6 +81,23 @@ static int acpi_sleep_prepare(u32 acpi_state)
81#ifdef CONFIG_ACPI_SLEEP 81#ifdef CONFIG_ACPI_SLEEP
82static u32 acpi_target_sleep_state = ACPI_STATE_S0; 82static u32 acpi_target_sleep_state = ACPI_STATE_S0;
83/* 83/*
84 * According to the ACPI specification the BIOS should make sure that ACPI is
85 * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
86 * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
87 * on such systems during resume. Unfortunately that doesn't help in
88 * particularly pathological cases in which SCI_EN has to be set directly on
89 * resume, although the specification states very clearly that this flag is
90 * owned by the hardware. The set_sci_en_on_resume variable will be set in such
91 * cases.
92 */
93static bool set_sci_en_on_resume;
94
95void __init acpi_set_sci_en_on_resume(void)
96{
97 set_sci_en_on_resume = true;
98}
99
100/*
84 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the 101 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
85 * user to request that behavior by using the 'acpi_old_suspend_ordering' 102 * user to request that behavior by using the 'acpi_old_suspend_ordering'
86 * kernel command line option that causes the following variable to be set. 103 * kernel command line option that causes the following variable to be set.
@@ -170,18 +187,6 @@ static void acpi_pm_end(void)
170#endif /* CONFIG_ACPI_SLEEP */ 187#endif /* CONFIG_ACPI_SLEEP */
171 188
172#ifdef CONFIG_SUSPEND 189#ifdef CONFIG_SUSPEND
173/*
174 * According to the ACPI specification the BIOS should make sure that ACPI is
175 * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
176 * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
177 * on such systems during resume. Unfortunately that doesn't help in
178 * particularly pathological cases in which SCI_EN has to be set directly on
179 * resume, although the specification states very clearly that this flag is
180 * owned by the hardware. The set_sci_en_on_resume variable will be set in such
181 * cases.
182 */
183static bool set_sci_en_on_resume;
184
185extern void do_suspend_lowlevel(void); 190extern void do_suspend_lowlevel(void);
186 191
187static u32 acpi_suspend_states[] = { 192static u32 acpi_suspend_states[] = {
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f336bca7c450..8a0ed2800e63 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
213 unsigned long table_end; 213 unsigned long table_end;
214 acpi_size tbl_size; 214 acpi_size tbl_size;
215 215
216 if (acpi_disabled) 216 if (acpi_disabled && !acpi_ht)
217 return -ENODEV; 217 return -ENODEV;
218 218
219 if (!handler) 219 if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
280 struct acpi_table_header *table = NULL; 280 struct acpi_table_header *table = NULL;
281 acpi_size tbl_size; 281 acpi_size tbl_size;
282 282
283 if (acpi_disabled) 283 if (acpi_disabled && !acpi_ht)
284 return -ENODEV; 284 return -ENODEV;
285 285
286 if (!handler) 286 if (!handler)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 05dff631591c..b765790b32be 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -78,6 +78,13 @@ MODULE_LICENSE("GPL");
78static int brightness_switch_enabled = 1; 78static int brightness_switch_enabled = 1;
79module_param(brightness_switch_enabled, bool, 0644); 79module_param(brightness_switch_enabled, bool, 0644);
80 80
81/*
82 * By default, we don't allow duplicate ACPI video bus devices
83 * under the same VGA controller
84 */
85static int allow_duplicates;
86module_param(allow_duplicates, bool, 0644);
87
81static int register_count = 0; 88static int register_count = 0;
82static int acpi_video_bus_add(struct acpi_device *device); 89static int acpi_video_bus_add(struct acpi_device *device);
83static int acpi_video_bus_remove(struct acpi_device *device, int type); 90static int acpi_video_bus_remove(struct acpi_device *device, int type);
@@ -999,8 +1006,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
999 sprintf(name, "acpi_video%d", count++); 1006 sprintf(name, "acpi_video%d", count++);
1000 device->backlight = backlight_device_register(name, 1007 device->backlight = backlight_device_register(name,
1001 NULL, device, &acpi_backlight_ops); 1008 NULL, device, &acpi_backlight_ops);
1002 device->backlight->props.max_brightness = device->brightness->count-3;
1003 kfree(name); 1009 kfree(name);
1010 if (IS_ERR(device->backlight))
1011 return;
1012 device->backlight->props.max_brightness = device->brightness->count-3;
1004 1013
1005 result = sysfs_create_link(&device->backlight->dev.kobj, 1014 result = sysfs_create_link(&device->backlight->dev.kobj,
1006 &device->dev->dev.kobj, "device"); 1015 &device->dev->dev.kobj, "device");
@@ -1979,6 +1988,10 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
1979 unsigned long long level_current, level_next; 1988 unsigned long long level_current, level_next;
1980 int result = -EINVAL; 1989 int result = -EINVAL;
1981 1990
1991 /* no warning message if acpi_backlight=vendor is used */
1992 if (!acpi_video_backlight_support())
1993 return 0;
1994
1982 if (!device->brightness) 1995 if (!device->brightness)
1983 goto out; 1996 goto out;
1984 1997
@@ -2233,11 +2246,47 @@ static int acpi_video_resume(struct acpi_device *device)
2233 return AE_OK; 2246 return AE_OK;
2234} 2247}
2235 2248
2249static acpi_status
2250acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
2251 void **return_value)
2252{
2253 struct acpi_device *device = context;
2254 struct acpi_device *sibling;
2255 int result;
2256
2257 if (handle == device->handle)
2258 return AE_CTRL_TERMINATE;
2259
2260 result = acpi_bus_get_device(handle, &sibling);
2261 if (result)
2262 return AE_OK;
2263
2264 if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME))
2265 return AE_ALREADY_EXISTS;
2266
2267 return AE_OK;
2268}
2269
2236static int acpi_video_bus_add(struct acpi_device *device) 2270static int acpi_video_bus_add(struct acpi_device *device)
2237{ 2271{
2238 struct acpi_video_bus *video; 2272 struct acpi_video_bus *video;
2239 struct input_dev *input; 2273 struct input_dev *input;
2240 int error; 2274 int error;
2275 acpi_status status;
2276
2277 status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
2278 device->parent->handle, 1,
2279 acpi_video_bus_match, NULL,
2280 device, NULL);
2281 if (status == AE_ALREADY_EXISTS) {
2282 printk(KERN_WARNING FW_BUG
2283 "Duplicate ACPI video bus devices for the"
2284 " same VGA controller, please try module "
2285 "parameter \"video.allow_duplicates=1\""
2286 "if the current driver doesn't work.\n");
2287 if (!allow_duplicates)
2288 return -ENODEV;
2289 }
2241 2290
2242 video = kzalloc(sizeof(struct acpi_video_bus), GFP_KERNEL); 2291 video = kzalloc(sizeof(struct acpi_video_bus), GFP_KERNEL);
2243 if (!video) 2292 if (!video)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b8bea100a160..b34390347c16 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
2868 }, 2868 },
2869 .driver_data = "F.23", /* cutoff BIOS version */ 2869 .driver_data = "F.23", /* cutoff BIOS version */
2870 }, 2870 },
2871 /*
2872 * Acer eMachines G725 has the same problem. BIOS
2873 * V1.03 is known to be broken. V3.04 is known to
2874 * work. Inbetween, there are V1.06, V2.06 and V3.03
2875 * that we don't have much idea about. For now,
2876 * blacklist anything older than V3.04.
2877 */
2878 {
2879 .ident = "G725",
2880 .matches = {
2881 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
2882 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
2883 },
2884 .driver_data = "V3.04", /* cutoff BIOS version */
2885 },
2871 { } /* terminate list */ 2886 { } /* terminate list */
2872 }; 2887 };
2873 const struct dmi_system_id *dmi = dmi_first_match(sysids); 2888 const struct dmi_system_id *dmi = dmi_first_match(sysids);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 19136a7e1064..6f3f2257d0f0 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -329,7 +329,7 @@ static struct ata_port_operations ich_pata_ops = {
329}; 329};
330 330
331static struct ata_port_operations piix_sata_ops = { 331static struct ata_port_operations piix_sata_ops = {
332 .inherits = &ata_bmdma_port_ops, 332 .inherits = &ata_bmdma32_port_ops,
333}; 333};
334 334
335static struct ata_port_operations piix_sidpr_sata_ops = { 335static struct ata_port_operations piix_sidpr_sata_ops = {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 22ff51bdbc8a..6728328f3bea 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3790,21 +3790,45 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3790int sata_link_resume(struct ata_link *link, const unsigned long *params, 3790int sata_link_resume(struct ata_link *link, const unsigned long *params,
3791 unsigned long deadline) 3791 unsigned long deadline)
3792{ 3792{
3793 int tries = ATA_LINK_RESUME_TRIES;
3793 u32 scontrol, serror; 3794 u32 scontrol, serror;
3794 int rc; 3795 int rc;
3795 3796
3796 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3797 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3797 return rc; 3798 return rc;
3798 3799
3799 scontrol = (scontrol & 0x0f0) | 0x300; 3800 /*
3801 * Writes to SControl sometimes get ignored under certain
3802 * controllers (ata_piix SIDPR). Make sure DET actually is
3803 * cleared.
3804 */
3805 do {
3806 scontrol = (scontrol & 0x0f0) | 0x300;
3807 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3808 return rc;
3809 /*
3810 * Some PHYs react badly if SStatus is pounded
3811 * immediately after resuming. Delay 200ms before
3812 * debouncing.
3813 */
3814 msleep(200);
3800 3815
3801 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3816 /* is SControl restored correctly? */
3802 return rc; 3817 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3818 return rc;
3819 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3803 3820
3804 /* Some PHYs react badly if SStatus is pounded immediately 3821 if ((scontrol & 0xf0f) != 0x300) {
3805 * after resuming. Delay 200ms before debouncing. 3822 ata_link_printk(link, KERN_ERR,
3806 */ 3823 "failed to resume link (SControl %X)\n",
3807 msleep(200); 3824 scontrol);
3825 return 0;
3826 }
3827
3828 if (tries < ATA_LINK_RESUME_TRIES)
3829 ata_link_printk(link, KERN_WARNING,
3830 "link resume succeeded after %d retries\n",
3831 ATA_LINK_RESUME_TRIES - tries);
3808 3832
3809 if ((rc = sata_link_debounce(link, params, deadline))) 3833 if ((rc = sata_link_debounce(link, params, deadline)))
3810 return rc; 3834 return rc;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 0ea97c942ced..9f6cfac0f2cc 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2028,8 +2028,9 @@ static void ata_eh_link_autopsy(struct ata_link *link)
2028 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2028 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2029 2029
2030 /* determine whether the command is worth retrying */ 2030 /* determine whether the command is worth retrying */
2031 if (!(qc->err_mask & AC_ERR_INVALID) && 2031 if (qc->flags & ATA_QCFLAG_IO ||
2032 ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV)) 2032 (!(qc->err_mask & AC_ERR_INVALID) &&
2033 qc->err_mask != AC_ERR_DEV))
2033 qc->flags |= ATA_QCFLAG_RETRY; 2034 qc->flags |= ATA_QCFLAG_RETRY;
2034 2035
2035 /* accumulate error info */ 2036 /* accumulate error info */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f4ea5a8c325b..d096fbcbc771 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2875 * write indication (used for PIO/DMA setup), result TF is 2875 * write indication (used for PIO/DMA setup), result TF is
2876 * copied back and we don't whine too much about its failure. 2876 * copied back and we don't whine too much about its failure.
2877 */ 2877 */
2878 tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2878 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2879 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2879 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2880 tf->flags |= ATA_TFLAG_WRITE; 2880 tf->flags |= ATA_TFLAG_WRITE;
2881 2881
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 741065c9da67..730ef3c384ca 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
893 do_write); 893 do_write);
894 } 894 }
895 895
896 if (!do_write)
897 flush_dcache_page(page);
898
896 qc->curbytes += qc->sect_size; 899 qc->curbytes += qc->sect_size;
897 qc->cursg_ofs += qc->sect_size; 900 qc->cursg_ofs += qc->sect_size;
898 901
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 07d8d00b4d34..63306285c843 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -862,7 +862,7 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
862 if (port_status & PDC_DRIVE_ERR) 862 if (port_status & PDC_DRIVE_ERR)
863 ac_err_mask |= AC_ERR_DEV; 863 ac_err_mask |= AC_ERR_DEV;
864 if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR)) 864 if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR))
865 ac_err_mask |= AC_ERR_HSM; 865 ac_err_mask |= AC_ERR_OTHER;
866 if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR)) 866 if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR))
867 ac_err_mask |= AC_ERR_ATA_BUS; 867 ac_err_mask |= AC_ERR_ATA_BUS;
868 if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR 868 if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 161746deab4b..6e2c3b064f53 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj)
59 else 59 else
60 pr_debug("class '%s' does not have a release() function, " 60 pr_debug("class '%s' does not have a release() function, "
61 "be careful\n", class->name); 61 "be careful\n", class->name);
62
63 kfree(cp);
62} 64}
63 65
64static struct sysfs_ops class_sysfs_ops = { 66static struct sysfs_ops class_sysfs_ops = {
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 090dd4851301..42ae452b36b0 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -354,6 +354,7 @@ int __init devtmpfs_init(void)
354{ 354{
355 int err; 355 int err;
356 struct vfsmount *mnt; 356 struct vfsmount *mnt;
357 char options[] = "mode=0755";
357 358
358 err = register_filesystem(&dev_fs_type); 359 err = register_filesystem(&dev_fs_type);
359 if (err) { 360 if (err) {
@@ -362,7 +363,7 @@ int __init devtmpfs_init(void)
362 return err; 363 return err;
363 } 364 }
364 365
365 mnt = kern_mount_data(&dev_fs_type, "mode=0755"); 366 mnt = kern_mount_data(&dev_fs_type, options);
366 if (IS_ERR(mnt)) { 367 if (IS_ERR(mnt)) {
367 err = PTR_ERR(mnt); 368 err = PTR_ERR(mnt);
368 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); 369 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index d7d77d4a402c..bd025059711f 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -311,7 +311,7 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
311static ssize_t 311static ssize_t
312print_block_size(struct class *class, char *buf) 312print_block_size(struct class *class, char *buf)
313{ 313{
314 return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); 314 return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
315} 315}
316 316
317static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); 317static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 48adf80926a0..a5142bddef41 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -446,8 +446,8 @@ EXPORT_SYMBOL_GPL(dpm_resume_noirq);
446 446
447/** 447/**
448 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 448 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
449 * dev: Device to resume. 449 * @dev: Device to resume.
450 * cb: Resume callback to execute. 450 * @cb: Resume callback to execute.
451 */ 451 */
452static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 452static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
453{ 453{
@@ -711,8 +711,9 @@ EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
711 711
712/** 712/**
713 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 713 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
714 * dev: Device to suspend. 714 * @dev: Device to suspend.
715 * cb: Suspend callback to execute. 715 * @state: PM transition of the system being carried out.
716 * @cb: Suspend callback to execute.
716 */ 717 */
717static int legacy_suspend(struct device *dev, pm_message_t state, 718static int legacy_suspend(struct device *dev, pm_message_t state,
718 int (*cb)(struct device *dev, pm_message_t state)) 719 int (*cb)(struct device *dev, pm_message_t state))
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index eb4fa1943944..ce1fa923c414 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -7101,7 +7101,7 @@ static struct DAC960_privdata DAC960_BA_privdata = {
7101 7101
7102static struct DAC960_privdata DAC960_LP_privdata = { 7102static struct DAC960_privdata DAC960_LP_privdata = {
7103 .HardwareType = DAC960_LP_Controller, 7103 .HardwareType = DAC960_LP_Controller,
7104 .FirmwareType = DAC960_LP_Controller, 7104 .FirmwareType = DAC960_V2_Controller,
7105 .InterruptHandler = DAC960_LP_InterruptHandler, 7105 .InterruptHandler = DAC960_LP_InterruptHandler,
7106 .MemoryWindowSize = DAC960_LP_RegisterWindowSize, 7106 .MemoryWindowSize = DAC960_LP_RegisterWindowSize,
7107}; 7107};
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 13bb69d2abb3..64a223b0cc22 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -735,21 +735,6 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
735 part_stat_unlock(); 735 part_stat_unlock();
736} 736}
737 737
738/*
739 * Ensure we don't create aliases in VI caches
740 */
741static inline void
742killalias(struct bio *bio)
743{
744 struct bio_vec *bv;
745 int i;
746
747 if (bio_data_dir(bio) == READ)
748 __bio_for_each_segment(bv, bio, i, 0) {
749 flush_dcache_page(bv->bv_page);
750 }
751}
752
753void 738void
754aoecmd_ata_rsp(struct sk_buff *skb) 739aoecmd_ata_rsp(struct sk_buff *skb)
755{ 740{
@@ -871,7 +856,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
871 if (buf->flags & BUFFL_FAIL) 856 if (buf->flags & BUFFL_FAIL)
872 bio_endio(buf->bio, -EIO); 857 bio_endio(buf->bio, -EIO);
873 else { 858 else {
874 killalias(buf->bio); 859 bio_flush_dcache_pages(buf->bio);
875 bio_endio(buf->bio, 0); 860 bio_endio(buf->bio, 0);
876 } 861 }
877 mempool_free(buf, d->bufpool); 862 mempool_free(buf, d->bufpool);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 873e594860d3..9291614ac6b7 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
337 if (*pos > h->highest_lun) 337 if (*pos > h->highest_lun)
338 return 0; 338 return 0;
339 339
340 if (drv == NULL) /* it's possible for h->drv[] to have holes. */
341 return 0;
342
340 if (drv->heads == 0) 343 if (drv->heads == 0)
341 return 0; 344 return 0;
342 345
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index f4acd04ebeef..df0983787390 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected" 5comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected"
6 depends on !PROC_FS || !INET || !CONNECTOR 6 depends on PROC_FS='n' || INET='n' || CONNECTOR='n'
7 7
8config BLK_DEV_DRBD 8config BLK_DEV_DRBD
9 tristate "DRBD Distributed Replicated Block Device support" 9 tristate "DRBD Distributed Replicated Block Device support"
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 2312d782fe99..2bf3a6ef3684 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1275,7 +1275,7 @@ struct bm_extent {
1275#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 1275#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
1276#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM 1276#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
1277#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM 1277#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
1278#elif !defined(CONFIG_LBD) && BITS_PER_LONG == 32 1278#elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1279#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 1279#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1280#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 1280#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1281#else 1281#else
@@ -1371,10 +1371,9 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1371extern void drbd_suspend_io(struct drbd_conf *mdev); 1371extern void drbd_suspend_io(struct drbd_conf *mdev);
1372extern void drbd_resume_io(struct drbd_conf *mdev); 1372extern void drbd_resume_io(struct drbd_conf *mdev);
1373extern char *ppsize(char *buf, unsigned long long size); 1373extern char *ppsize(char *buf, unsigned long long size);
1374extern sector_t drbd_new_dev_size(struct drbd_conf *, 1374extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
1375 struct drbd_backing_dev *);
1376enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1375enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
1377extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local); 1376extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local);
1378extern void resync_after_online_grow(struct drbd_conf *); 1377extern void resync_after_online_grow(struct drbd_conf *);
1379extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); 1378extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
1380extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, 1379extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
@@ -1490,7 +1489,7 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
1490 1489
1491/* drbd_proc.c */ 1490/* drbd_proc.c */
1492extern struct proc_dir_entry *drbd_proc; 1491extern struct proc_dir_entry *drbd_proc;
1493extern struct file_operations drbd_proc_fops; 1492extern const struct file_operations drbd_proc_fops;
1494extern const char *drbd_conn_str(enum drbd_conns s); 1493extern const char *drbd_conn_str(enum drbd_conns s);
1495extern const char *drbd_role_str(enum drbd_role s); 1494extern const char *drbd_role_str(enum drbd_role s);
1496 1495
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 157d1e4343c2..ab871e00ffc5 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -27,7 +27,6 @@
27 */ 27 */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/version.h>
31#include <linux/drbd.h> 30#include <linux/drbd.h>
32#include <asm/uaccess.h> 31#include <asm/uaccess.h>
33#include <asm/types.h> 32#include <asm/types.h>
@@ -151,7 +150,7 @@ wait_queue_head_t drbd_pp_wait;
151 150
152DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); 151DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
153 152
154static struct block_device_operations drbd_ops = { 153static const struct block_device_operations drbd_ops = {
155 .owner = THIS_MODULE, 154 .owner = THIS_MODULE,
156 .open = drbd_open, 155 .open = drbd_open,
157 .release = drbd_release, 156 .release = drbd_release,
@@ -1299,6 +1298,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1299 dev_err(DEV, "Sending state in drbd_io_error() failed\n"); 1298 dev_err(DEV, "Sending state in drbd_io_error() failed\n");
1300 } 1299 }
1301 1300
1301 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1302 lc_destroy(mdev->resync); 1302 lc_destroy(mdev->resync);
1303 mdev->resync = NULL; 1303 mdev->resync = NULL;
1304 lc_destroy(mdev->act_log); 1304 lc_destroy(mdev->act_log);
@@ -2973,7 +2973,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
2973 goto out_no_q; 2973 goto out_no_q;
2974 mdev->rq_queue = q; 2974 mdev->rq_queue = q;
2975 q->queuedata = mdev; 2975 q->queuedata = mdev;
2976 blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
2977 2976
2978 disk = alloc_disk(1); 2977 disk = alloc_disk(1);
2979 if (!disk) 2978 if (!disk)
@@ -2997,6 +2996,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
2997 q->backing_dev_info.congested_data = mdev; 2996 q->backing_dev_info.congested_data = mdev;
2998 2997
2999 blk_queue_make_request(q, drbd_make_request_26); 2998 blk_queue_make_request(q, drbd_make_request_26);
2999 blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
3000 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 3000 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3001 blk_queue_merge_bvec(q, drbd_merge_bvec); 3001 blk_queue_merge_bvec(q, drbd_merge_bvec);
3002 q->queue_lock = &mdev->req_lock; /* needed since we use */ 3002 q->queue_lock = &mdev->req_lock; /* needed since we use */
@@ -3623,7 +3623,7 @@ _drbd_fault_random(struct fault_random_state *rsp)
3623{ 3623{
3624 long refresh; 3624 long refresh;
3625 3625
3626 if (--rsp->count < 0) { 3626 if (!rsp->count--) {
3627 get_random_bytes(&refresh, sizeof(refresh)); 3627 get_random_bytes(&refresh, sizeof(refresh));
3628 rsp->state += refresh; 3628 rsp->state += refresh;
3629 rsp->count = FAULT_RANDOM_REFRESH; 3629 rsp->count = FAULT_RANDOM_REFRESH;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 4e0726aa53b0..1292e0620663 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
510 * Returns 0 on success, negative return values indicate errors. 510 * Returns 0 on success, negative return values indicate errors.
511 * You should call drbd_md_sync() after calling this function. 511 * You should call drbd_md_sync() after calling this function.
512 */ 512 */
513enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local) 513enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local)
514{ 514{
515 sector_t prev_first_sect, prev_size; /* previous meta location */ 515 sector_t prev_first_sect, prev_size; /* previous meta location */
516 sector_t la_size; 516 sector_t la_size;
@@ -541,7 +541,7 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_ho
541 /* TODO: should only be some assert here, not (re)init... */ 541 /* TODO: should only be some assert here, not (re)init... */
542 drbd_md_set_sector_offsets(mdev, mdev->ldev); 542 drbd_md_set_sector_offsets(mdev, mdev->ldev);
543 543
544 size = drbd_new_dev_size(mdev, mdev->ldev); 544 size = drbd_new_dev_size(mdev, mdev->ldev, force);
545 545
546 if (drbd_get_capacity(mdev->this_bdev) != size || 546 if (drbd_get_capacity(mdev->this_bdev) != size ||
547 drbd_bm_capacity(mdev) != size) { 547 drbd_bm_capacity(mdev) != size) {
@@ -596,7 +596,7 @@ out:
596} 596}
597 597
598sector_t 598sector_t
599drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) 599drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
600{ 600{
601 sector_t p_size = mdev->p_size; /* partner's disk size. */ 601 sector_t p_size = mdev->p_size; /* partner's disk size. */
602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ 602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
@@ -606,6 +606,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
606 606
607 m_size = drbd_get_max_capacity(bdev); 607 m_size = drbd_get_max_capacity(bdev);
608 608
609 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
610 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
611 p_size = m_size;
612 }
613
609 if (p_size && m_size) { 614 if (p_size && m_size) {
610 size = min_t(sector_t, p_size, m_size); 615 size = min_t(sector_t, p_size, m_size);
611 } else { 616 } else {
@@ -965,7 +970,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
965 970
966 /* Prevent shrinking of consistent devices ! */ 971 /* Prevent shrinking of consistent devices ! */
967 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 972 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
968 drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) { 973 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
969 dev_warn(DEV, "refusing to truncate a consistent device\n"); 974 dev_warn(DEV, "refusing to truncate a consistent device\n");
970 retcode = ERR_DISK_TO_SMALL; 975 retcode = ERR_DISK_TO_SMALL;
971 goto force_diskless_dec; 976 goto force_diskless_dec;
@@ -1052,7 +1057,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1052 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1057 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1053 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1058 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1054 1059
1055 dd = drbd_determin_dev_size(mdev); 1060 dd = drbd_determin_dev_size(mdev, 0);
1056 if (dd == dev_size_error) { 1061 if (dd == dev_size_error) {
1057 retcode = ERR_NOMEM_BITMAP; 1062 retcode = ERR_NOMEM_BITMAP;
1058 goto force_diskless_dec; 1063 goto force_diskless_dec;
@@ -1271,7 +1276,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1271 goto fail; 1276 goto fail;
1272 } 1277 }
1273 1278
1274 if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) { 1279 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1275 retcode = ERR_AUTH_ALG_ND; 1280 retcode = ERR_AUTH_ALG_ND;
1276 goto fail; 1281 goto fail;
1277 } 1282 }
@@ -1504,7 +1509,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1504 } 1509 }
1505 1510
1506 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1511 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1507 dd = drbd_determin_dev_size(mdev); 1512 dd = drbd_determin_dev_size(mdev, rs.resize_force);
1508 drbd_md_sync(mdev); 1513 drbd_md_sync(mdev);
1509 put_ldev(mdev); 1514 put_ldev(mdev);
1510 if (dd == dev_size_error) { 1515 if (dd == dev_size_error) {
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index bdd0b4943b10..df8ad9660d8f 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -38,7 +38,7 @@ static int drbd_proc_open(struct inode *inode, struct file *file);
38 38
39 39
40struct proc_dir_entry *drbd_proc; 40struct proc_dir_entry *drbd_proc;
41struct file_operations drbd_proc_fops = { 41const struct file_operations drbd_proc_fops = {
42 .owner = THIS_MODULE, 42 .owner = THIS_MODULE,
43 .open = drbd_proc_open, 43 .open = drbd_proc_open,
44 .read = seq_read, 44 .read = seq_read,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c548f24f54a1..d065c646b35a 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -28,7 +28,6 @@
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <net/sock.h> 29#include <net/sock.h>
30 30
31#include <linux/version.h>
32#include <linux/drbd.h> 31#include <linux/drbd.h>
33#include <linux/fs.h> 32#include <linux/fs.h>
34#include <linux/file.h> 33#include <linux/file.h>
@@ -879,9 +878,13 @@ retry:
879 878
880 if (mdev->cram_hmac_tfm) { 879 if (mdev->cram_hmac_tfm) {
881 /* drbd_request_state(mdev, NS(conn, WFAuth)); */ 880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
882 if (!drbd_do_auth(mdev)) { 881 switch (drbd_do_auth(mdev)) {
882 case -1:
883 dev_err(DEV, "Authentication of peer failed\n"); 883 dev_err(DEV, "Authentication of peer failed\n");
884 return -1; 884 return -1;
885 case 0:
886 dev_err(DEV, "Authentication of peer failed, trying again.\n");
887 return 0;
885 } 888 }
886 } 889 }
887 890
@@ -1202,10 +1205,11 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1202 1205
1203 case WO_bdev_flush: 1206 case WO_bdev_flush:
1204 case WO_drain_io: 1207 case WO_drain_io:
1205 D_ASSERT(rv == FE_STILL_LIVE); 1208 if (rv == FE_STILL_LIVE) {
1206 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); 1209 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1207 drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1210 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1208 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1211 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1212 }
1209 if (rv == FE_RECYCLED) 1213 if (rv == FE_RECYCLED)
1210 return TRUE; 1214 return TRUE;
1211 1215
@@ -1220,7 +1224,7 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1220 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 1224 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1221 if (!epoch) { 1225 if (!epoch) {
1222 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); 1226 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1223 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); 1227 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1224 drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1228 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1225 if (issue_flush) { 1229 if (issue_flush) {
1226 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1230 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
@@ -2866,7 +2870,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2866 2870
2867 /* Never shrink a device with usable data during connect. 2871 /* Never shrink a device with usable data during connect.
2868 But allow online shrinking if we are connected. */ 2872 But allow online shrinking if we are connected. */
2869 if (drbd_new_dev_size(mdev, mdev->ldev) < 2873 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
2870 drbd_get_capacity(mdev->this_bdev) && 2874 drbd_get_capacity(mdev->this_bdev) &&
2871 mdev->state.disk >= D_OUTDATED && 2875 mdev->state.disk >= D_OUTDATED &&
2872 mdev->state.conn < C_CONNECTED) { 2876 mdev->state.conn < C_CONNECTED) {
@@ -2881,7 +2885,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2881#undef min_not_zero 2885#undef min_not_zero
2882 2886
2883 if (get_ldev(mdev)) { 2887 if (get_ldev(mdev)) {
2884 dd = drbd_determin_dev_size(mdev); 2888 dd = drbd_determin_dev_size(mdev, 0);
2885 put_ldev(mdev); 2889 put_ldev(mdev);
2886 if (dd == dev_size_error) 2890 if (dd == dev_size_error)
2887 return FALSE; 2891 return FALSE;
@@ -3831,10 +3835,17 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3831{ 3835{
3832 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 3836 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3833 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 3837 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3834 return 0; 3838 return -1;
3835} 3839}
3836#else 3840#else
3837#define CHALLENGE_LEN 64 3841#define CHALLENGE_LEN 64
3842
3843/* Return value:
3844 1 - auth succeeded,
3845 0 - failed, try again (network error),
3846 -1 - auth failed, don't try again.
3847*/
3848
3838static int drbd_do_auth(struct drbd_conf *mdev) 3849static int drbd_do_auth(struct drbd_conf *mdev)
3839{ 3850{
3840 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ 3851 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
@@ -3855,7 +3866,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3855 (u8 *)mdev->net_conf->shared_secret, key_len); 3866 (u8 *)mdev->net_conf->shared_secret, key_len);
3856 if (rv) { 3867 if (rv) {
3857 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); 3868 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
3858 rv = 0; 3869 rv = -1;
3859 goto fail; 3870 goto fail;
3860 } 3871 }
3861 3872
@@ -3878,14 +3889,14 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3878 3889
3879 if (p.length > CHALLENGE_LEN*2) { 3890 if (p.length > CHALLENGE_LEN*2) {
3880 dev_err(DEV, "expected AuthChallenge payload too big.\n"); 3891 dev_err(DEV, "expected AuthChallenge payload too big.\n");
3881 rv = 0; 3892 rv = -1;
3882 goto fail; 3893 goto fail;
3883 } 3894 }
3884 3895
3885 peers_ch = kmalloc(p.length, GFP_NOIO); 3896 peers_ch = kmalloc(p.length, GFP_NOIO);
3886 if (peers_ch == NULL) { 3897 if (peers_ch == NULL) {
3887 dev_err(DEV, "kmalloc of peers_ch failed\n"); 3898 dev_err(DEV, "kmalloc of peers_ch failed\n");
3888 rv = 0; 3899 rv = -1;
3889 goto fail; 3900 goto fail;
3890 } 3901 }
3891 3902
@@ -3901,7 +3912,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3901 response = kmalloc(resp_size, GFP_NOIO); 3912 response = kmalloc(resp_size, GFP_NOIO);
3902 if (response == NULL) { 3913 if (response == NULL) {
3903 dev_err(DEV, "kmalloc of response failed\n"); 3914 dev_err(DEV, "kmalloc of response failed\n");
3904 rv = 0; 3915 rv = -1;
3905 goto fail; 3916 goto fail;
3906 } 3917 }
3907 3918
@@ -3911,7 +3922,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3911 rv = crypto_hash_digest(&desc, &sg, sg.length, response); 3922 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
3912 if (rv) { 3923 if (rv) {
3913 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 3924 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3914 rv = 0; 3925 rv = -1;
3915 goto fail; 3926 goto fail;
3916 } 3927 }
3917 3928
@@ -3945,9 +3956,9 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3945 } 3956 }
3946 3957
3947 right_response = kmalloc(resp_size, GFP_NOIO); 3958 right_response = kmalloc(resp_size, GFP_NOIO);
3948 if (response == NULL) { 3959 if (right_response == NULL) {
3949 dev_err(DEV, "kmalloc of right_response failed\n"); 3960 dev_err(DEV, "kmalloc of right_response failed\n");
3950 rv = 0; 3961 rv = -1;
3951 goto fail; 3962 goto fail;
3952 } 3963 }
3953 3964
@@ -3956,7 +3967,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3956 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); 3967 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
3957 if (rv) { 3968 if (rv) {
3958 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 3969 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3959 rv = 0; 3970 rv = -1;
3960 goto fail; 3971 goto fail;
3961 } 3972 }
3962 3973
@@ -3965,6 +3976,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3965 if (rv) 3976 if (rv)
3966 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", 3977 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
3967 resp_size, mdev->net_conf->cram_hmac_alg); 3978 resp_size, mdev->net_conf->cram_hmac_alg);
3979 else
3980 rv = -1;
3968 3981
3969 fail: 3982 fail:
3970 kfree(peers_ch); 3983 kfree(peers_ch);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index ed8796f1112d..b453c2bca3be 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -24,7 +24,6 @@
24 */ 24 */
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/version.h>
28#include <linux/drbd.h> 27#include <linux/drbd.h>
29#include <linux/sched.h> 28#include <linux/sched.h>
30#include <linux/smp_lock.h> 29#include <linux/smp_lock.h>
@@ -34,7 +33,6 @@
34#include <linux/mm_inline.h> 33#include <linux/mm_inline.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include <linux/random.h> 35#include <linux/random.h>
37#include <linux/mm.h>
38#include <linux/string.h> 36#include <linux/string.h>
39#include <linux/scatterlist.h> 37#include <linux/scatterlist.h>
40 38
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index e0339aaa1815..02b2583df7fc 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -860,7 +860,7 @@ static int mg_probe(struct platform_device *plat_dev)
860 err = -EINVAL; 860 err = -EINVAL;
861 goto probe_err_2; 861 goto probe_err_2;
862 } 862 }
863 host->dev_base = ioremap(rsc->start , rsc->end + 1); 863 host->dev_base = ioremap(rsc->start, resource_size(rsc));
864 if (!host->dev_base) { 864 if (!host->dev_base) {
865 printk(KERN_ERR "%s:%d ioremap fail\n", 865 printk(KERN_ERR "%s:%d ioremap fail\n",
866 __func__, __LINE__); 866 __func__, __LINE__);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 2ddf03ae034e..68b5957f107c 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -322,7 +322,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
322 pkt_kobj_remove(pd->kobj_stat); 322 pkt_kobj_remove(pd->kobj_stat);
323 pkt_kobj_remove(pd->kobj_wqueue); 323 pkt_kobj_remove(pd->kobj_wqueue);
324 if (class_pktcdvd) 324 if (class_pktcdvd)
325 device_destroy(class_pktcdvd, pd->pkt_dev); 325 device_unregister(pd->dev);
326} 326}
327 327
328 328
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 652367aa6546..058fbccf2f52 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -195,5 +195,16 @@ config BT_MRVL_SDIO
195 Say Y here to compile support for Marvell BT-over-SDIO driver 195 Say Y here to compile support for Marvell BT-over-SDIO driver
196 into the kernel or say M to compile it as module. 196 into the kernel or say M to compile it as module.
197 197
198endmenu 198config BT_ATH3K
199 tristate "Atheros firmware download driver"
200 depends on BT_HCIBTUSB
201 select FW_LOADER
202 help
203 Bluetooth firmware download driver.
204 This driver loads the firmware into the Atheros Bluetooth
205 chipset.
199 206
207 Say Y here to compile support for "Atheros firmware download driver"
208 into the kernel or say M to compile it as module (ath3k).
209
210endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b3f57d2d4eb0..7e5aed598121 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o
15obj-$(CONFIG_BT_HCIBTUSB) += btusb.o 15obj-$(CONFIG_BT_HCIBTUSB) += btusb.o
16obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o 16obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o
17 17
18obj-$(CONFIG_BT_ATH3K) += ath3k.o
18obj-$(CONFIG_BT_MRVL) += btmrvl.o 19obj-$(CONFIG_BT_MRVL) += btmrvl.o
19obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o 20obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
20 21
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
new file mode 100644
index 000000000000..add9485ca5b6
--- /dev/null
+++ b/drivers/bluetooth/ath3k.c
@@ -0,0 +1,187 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 */
19
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/errno.h>
27#include <linux/device.h>
28#include <linux/firmware.h>
29#include <linux/usb.h>
30#include <net/bluetooth/bluetooth.h>
31
32#define VERSION "1.0"
33
34
35static struct usb_device_id ath3k_table[] = {
36 /* Atheros AR3011 */
37 { USB_DEVICE(0x0CF3, 0x3000) },
38 { } /* Terminating entry */
39};
40
41MODULE_DEVICE_TABLE(usb, ath3k_table);
42
43#define USB_REQ_DFU_DNLOAD 1
44#define BULK_SIZE 4096
45
46struct ath3k_data {
47 struct usb_device *udev;
48 u8 *fw_data;
49 u32 fw_size;
50 u32 fw_sent;
51};
52
53static int ath3k_load_firmware(struct ath3k_data *data,
54 unsigned char *firmware,
55 int count)
56{
57 u8 *send_buf;
58 int err, pipe, len, size, sent = 0;
59
60 BT_DBG("ath3k %p udev %p", data, data->udev);
61
62 pipe = usb_sndctrlpipe(data->udev, 0);
63
64 if ((usb_control_msg(data->udev, pipe,
65 USB_REQ_DFU_DNLOAD,
66 USB_TYPE_VENDOR, 0, 0,
67 firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
68 BT_ERR("Can't change to loading configuration err");
69 return -EBUSY;
70 }
71 sent += 20;
72 count -= 20;
73
74 send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
75 if (!send_buf) {
76 BT_ERR("Can't allocate memory chunk for firmware");
77 return -ENOMEM;
78 }
79
80 while (count) {
81 size = min_t(uint, count, BULK_SIZE);
82 pipe = usb_sndbulkpipe(data->udev, 0x02);
83 memcpy(send_buf, firmware + sent, size);
84
85 err = usb_bulk_msg(data->udev, pipe, send_buf, size,
86 &len, 3000);
87
88 if (err || (len != size)) {
89 BT_ERR("Error in firmware loading err = %d,"
90 "len = %d, size = %d", err, len, size);
91 goto error;
92 }
93
94 sent += size;
95 count -= size;
96 }
97
98 kfree(send_buf);
99 return 0;
100
101error:
102 kfree(send_buf);
103 return err;
104}
105
106static int ath3k_probe(struct usb_interface *intf,
107 const struct usb_device_id *id)
108{
109 const struct firmware *firmware;
110 struct usb_device *udev = interface_to_usbdev(intf);
111 struct ath3k_data *data;
112 int size;
113
114 BT_DBG("intf %p id %p", intf, id);
115
116 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
117 return -ENODEV;
118
119 data = kzalloc(sizeof(*data), GFP_KERNEL);
120 if (!data)
121 return -ENOMEM;
122
123 data->udev = udev;
124
125 if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
126 kfree(data);
127 return -EIO;
128 }
129
130 size = max_t(uint, firmware->size, 4096);
131 data->fw_data = kmalloc(size, GFP_KERNEL);
132 if (!data->fw_data) {
133 release_firmware(firmware);
134 kfree(data);
135 return -ENOMEM;
136 }
137
138 memcpy(data->fw_data, firmware->data, firmware->size);
139 data->fw_size = firmware->size;
140 data->fw_sent = 0;
141 release_firmware(firmware);
142
143 usb_set_intfdata(intf, data);
144 if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) {
145 usb_set_intfdata(intf, NULL);
146 return -EIO;
147 }
148
149 return 0;
150}
151
152static void ath3k_disconnect(struct usb_interface *intf)
153{
154 struct ath3k_data *data = usb_get_intfdata(intf);
155
156 BT_DBG("ath3k_disconnect intf %p", intf);
157
158 kfree(data->fw_data);
159 kfree(data);
160}
161
162static struct usb_driver ath3k_driver = {
163 .name = "ath3k",
164 .probe = ath3k_probe,
165 .disconnect = ath3k_disconnect,
166 .id_table = ath3k_table,
167};
168
169static int __init ath3k_init(void)
170{
171 BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION);
172 return usb_register(&ath3k_driver);
173}
174
175static void __exit ath3k_exit(void)
176{
177 usb_deregister(&ath3k_driver);
178}
179
180module_init(ath3k_init);
181module_exit(ath3k_exit);
182
183MODULE_AUTHOR("Atheros Communications");
184MODULE_DESCRIPTION("Atheros AR30xx firmware driver");
185MODULE_VERSION(VERSION);
186MODULE_LICENSE("GPL");
187MODULE_FIRMWARE("ath3k-1.fw");
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 2acdc605cb4b..c2cf81144715 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -503,7 +503,9 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst)
503 unsigned int iobase; 503 unsigned int iobase;
504 unsigned char reg; 504 unsigned char reg;
505 505
506 BUG_ON(!info->hdev); 506 if (!info || !info->hdev)
507 /* our irq handler is shared */
508 return IRQ_NONE;
507 509
508 if (!test_bit(CARD_READY, &(info->hw_state))) 510 if (!test_bit(CARD_READY, &(info->hw_state)))
509 return IRQ_HANDLED; 511 return IRQ_HANDLED;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index d814a2755ccb..9f5926aaf57f 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -345,7 +345,9 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
345 int iir; 345 int iir;
346 irqreturn_t r = IRQ_NONE; 346 irqreturn_t r = IRQ_NONE;
347 347
348 BUG_ON(!info->hdev); 348 if (!info || !info->hdev)
349 /* our irq handler is shared */
350 return IRQ_NONE;
349 351
350 iobase = info->p_dev->io.BasePort1; 352 iobase = info->p_dev->io.BasePort1;
351 353
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index f36defa37764..57d965b7f521 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
808 808
809exit: 809exit:
810 sdio_release_host(card->func); 810 sdio_release_host(card->func);
811 kfree(tmpbuf);
811 812
812 return ret; 813 return ret;
813} 814}
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index d339464dc15e..91c523099804 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -295,7 +295,9 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst)
295 int iir, lsr; 295 int iir, lsr;
296 irqreturn_t r = IRQ_NONE; 296 irqreturn_t r = IRQ_NONE;
297 297
298 BUG_ON(!info->hdev); 298 if (!info || !info->hdev)
299 /* our irq handler is shared */
300 return IRQ_NONE;
299 301
300 iobase = info->p_dev->io.BasePort1; 302 iobase = info->p_dev->io.BasePort1;
301 303
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 4f02a6f3c980..697591941e17 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -299,7 +299,9 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
299 int iir, lsr; 299 int iir, lsr;
300 irqreturn_t r = IRQ_NONE; 300 irqreturn_t r = IRQ_NONE;
301 301
302 BUG_ON(!info->hdev); 302 if (!info || !info->hdev)
303 /* our irq handler is shared */
304 return IRQ_NONE;
303 305
304 iobase = info->p_dev->io.BasePort1; 306 iobase = info->p_dev->io.BasePort1;
305 307
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 31be3ac2e21b..e023682be2c4 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -669,7 +669,7 @@ config VIRTIO_CONSOLE
669 669
670config HVCS 670config HVCS
671 tristate "IBM Hypervisor Virtual Console Server support" 671 tristate "IBM Hypervisor Virtual Console Server support"
672 depends on PPC_PSERIES 672 depends on PPC_PSERIES && HVC_CONSOLE
673 help 673 help
674 Partitionable IBM Power5 ppc64 machines allow hosting of 674 Partitionable IBM Power5 ppc64 machines allow hosting of
675 firmware virtual consoles from one Linux partition by 675 firmware virtual consoles from one Linux partition by
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 2fb2e6cc322a..fd50ead59c79 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -728,6 +728,7 @@ int __init agp_amd64_init(void)
728 728
729 if (agp_off) 729 if (agp_off)
730 return -EINVAL; 730 return -EINVAL;
731
731 err = pci_register_driver(&agp_amd64_pci_driver); 732 err = pci_register_driver(&agp_amd64_pci_driver);
732 if (err < 0) 733 if (err < 0)
733 return err; 734 return err;
@@ -764,19 +765,28 @@ int __init agp_amd64_init(void)
764 return err; 765 return err;
765} 766}
766 767
768static int __init agp_amd64_mod_init(void)
769{
770#ifndef MODULE
771 if (gart_iommu_aperture)
772 return agp_bridges_found ? 0 : -ENODEV;
773#endif
774 return agp_amd64_init();
775}
776
767static void __exit agp_amd64_cleanup(void) 777static void __exit agp_amd64_cleanup(void)
768{ 778{
779#ifndef MODULE
780 if (gart_iommu_aperture)
781 return;
782#endif
769 if (aperture_resource) 783 if (aperture_resource)
770 release_resource(aperture_resource); 784 release_resource(aperture_resource);
771 pci_unregister_driver(&agp_amd64_pci_driver); 785 pci_unregister_driver(&agp_amd64_pci_driver);
772} 786}
773 787
774/* On AMD64 the PCI driver needs to initialize this driver early 788module_init(agp_amd64_mod_init);
775 for the IOMMU, so it has to be called via a backdoor. */
776#ifndef CONFIG_GART_IOMMU
777module_init(agp_amd64_init);
778module_exit(agp_amd64_cleanup); 789module_exit(agp_amd64_cleanup);
779#endif
780 790
781MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); 791MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
782module_param(agp_try_unsupported, bool, 0); 792module_param(agp_try_unsupported, bool, 0);
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index a56ca080e108..c3ab46da51a3 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -285,18 +285,22 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
285{ 285{
286 int error; 286 int error;
287 287
288 if (agp_off) 288 if (agp_off) {
289 return -ENODEV; 289 error = -ENODEV;
290 goto err_put_bridge;
291 }
290 292
291 if (!bridge->dev) { 293 if (!bridge->dev) {
292 printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n"); 294 printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
293 return -EINVAL; 295 error = -EINVAL;
296 goto err_put_bridge;
294 } 297 }
295 298
296 /* Grab reference on the chipset driver. */ 299 /* Grab reference on the chipset driver. */
297 if (!try_module_get(bridge->driver->owner)) { 300 if (!try_module_get(bridge->driver->owner)) {
298 dev_info(&bridge->dev->dev, "can't lock chipset driver\n"); 301 dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
299 return -EINVAL; 302 error = -EINVAL;
303 goto err_put_bridge;
300 } 304 }
301 305
302 error = agp_backend_initialize(bridge); 306 error = agp_backend_initialize(bridge);
@@ -326,6 +330,7 @@ frontend_err:
326 agp_backend_cleanup(bridge); 330 agp_backend_cleanup(bridge);
327err_out: 331err_out:
328 module_put(bridge->driver->owner); 332 module_put(bridge->driver->owner);
333err_put_bridge:
329 agp_put_bridge(bridge); 334 agp_put_bridge(bridge);
330 return error; 335 return error;
331} 336}
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 9047b2714653..58752b70efea 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -488,9 +488,8 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
488 handle = obj; 488 handle = obj;
489 do { 489 do {
490 status = acpi_get_object_info(handle, &info); 490 status = acpi_get_object_info(handle, &info);
491 if (ACPI_SUCCESS(status)) { 491 if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
492 /* TBD check _CID also */ 492 /* TBD check _CID also */
493 info->hardware_id.string[sizeof(info->hardware_id.length)-1] = '\0';
494 match = (strcmp(info->hardware_id.string, "HWP0001") == 0); 493 match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
495 kfree(info); 494 kfree(info);
496 if (match) { 495 if (match) {
@@ -509,6 +508,9 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
509 handle = parent; 508 handle = parent;
510 } while (ACPI_SUCCESS(status)); 509 } while (ACPI_SUCCESS(status));
511 510
511 if (ACPI_FAILURE(status))
512 return AE_OK; /* found no enclosing IOC */
513
512 if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa)) 514 if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
513 return AE_OK; 515 return AE_OK;
514 516
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 30c36ac2cd00..3999a5f25f38 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -2460,10 +2460,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2460 &bridge->mode); 2460 &bridge->mode);
2461 } 2461 }
2462 2462
2463 if (bridge->driver->mask_memory == intel_i965_mask_memory) 2463 if (bridge->driver->mask_memory == intel_i965_mask_memory) {
2464 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) 2464 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
2465 dev_err(&intel_private.pcidev->dev, 2465 dev_err(&intel_private.pcidev->dev,
2466 "set gfx device dma mask 36bit failed!\n"); 2466 "set gfx device dma mask 36bit failed!\n");
2467 else
2468 pci_set_consistent_dma_mask(intel_private.pcidev,
2469 DMA_BIT_MASK(36));
2470 }
2467 2471
2468 pci_set_drvdata(pdev, bridge); 2472 pci_set_drvdata(pdev, bridge);
2469 return agp_add_bridge(bridge); 2473 return agp_add_bridge(bridge);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index e989f67bb61f..3d9c61e5acbf 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -158,10 +158,11 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
158 goto out; 158 goto out;
159 } 159 }
160 } 160 }
161out_unlock:
162 mutex_unlock(&rng_mutex);
163out: 161out:
164 return ret ? : err; 162 return ret ? : err;
163out_unlock:
164 mutex_unlock(&rng_mutex);
165 goto out;
165} 166}
166 167
167 168
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index bdaef8e94021..64fe0a793efd 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -114,7 +114,7 @@ static struct virtio_device_id id_table[] = {
114 { 0 }, 114 { 0 },
115}; 115};
116 116
117static struct virtio_driver virtio_rng = { 117static struct virtio_driver virtio_rng_driver = {
118 .driver.name = KBUILD_MODNAME, 118 .driver.name = KBUILD_MODNAME,
119 .driver.owner = THIS_MODULE, 119 .driver.owner = THIS_MODULE,
120 .id_table = id_table, 120 .id_table = id_table,
@@ -124,12 +124,12 @@ static struct virtio_driver virtio_rng = {
124 124
125static int __init init(void) 125static int __init init(void)
126{ 126{
127 return register_virtio_driver(&virtio_rng); 127 return register_virtio_driver(&virtio_rng_driver);
128} 128}
129 129
130static void __exit fini(void) 130static void __exit fini(void)
131{ 131{
132 unregister_virtio_driver(&virtio_rng); 132 unregister_virtio_driver(&virtio_rng_driver);
133} 133}
134module_init(init); 134module_init(init);
135module_exit(fini); 135module_exit(fini);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 679cd08b80b4..176f1751237f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -3204,7 +3204,7 @@ static __devinit int init_ipmi_si(void)
3204#ifdef CONFIG_ACPI 3204#ifdef CONFIG_ACPI
3205 spmi_find_bmc(); 3205 spmi_find_bmc();
3206#endif 3206#endif
3207#ifdef CONFIG_PNP 3207#ifdef CONFIG_ACPI
3208 pnp_register_driver(&ipmi_pnp_driver); 3208 pnp_register_driver(&ipmi_pnp_driver);
3209#endif 3209#endif
3210 3210
@@ -3330,7 +3330,7 @@ static __exit void cleanup_ipmi_si(void)
3330#ifdef CONFIG_PCI 3330#ifdef CONFIG_PCI
3331 pci_unregister_driver(&ipmi_pci_driver); 3331 pci_unregister_driver(&ipmi_pci_driver);
3332#endif 3332#endif
3333#ifdef CONFIG_PNP 3333#ifdef CONFIG_ACPI
3334 pnp_unregister_driver(&ipmi_pnp_driver); 3334 pnp_unregister_driver(&ipmi_pnp_driver);
3335#endif 3335#endif
3336 3336
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index be832b6f8279..48788db4e280 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -395,6 +395,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
395 unsigned long p = *ppos; 395 unsigned long p = *ppos;
396 ssize_t low_count, read, sz; 396 ssize_t low_count, read, sz;
397 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ 397 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
398 int err = 0;
398 399
399 read = 0; 400 read = 0;
400 if (p < (unsigned long) high_memory) { 401 if (p < (unsigned long) high_memory) {
@@ -441,12 +442,16 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
441 return -ENOMEM; 442 return -ENOMEM;
442 while (count > 0) { 443 while (count > 0) {
443 sz = size_inside_page(p, count); 444 sz = size_inside_page(p, count);
445 if (!is_vmalloc_or_module_addr((void *)p)) {
446 err = -ENXIO;
447 break;
448 }
444 sz = vread(kbuf, (char *)p, sz); 449 sz = vread(kbuf, (char *)p, sz);
445 if (!sz) 450 if (!sz)
446 break; 451 break;
447 if (copy_to_user(buf, kbuf, sz)) { 452 if (copy_to_user(buf, kbuf, sz)) {
448 free_page((unsigned long)kbuf); 453 err = -EFAULT;
449 return -EFAULT; 454 break;
450 } 455 }
451 count -= sz; 456 count -= sz;
452 buf += sz; 457 buf += sz;
@@ -455,8 +460,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
455 } 460 }
456 free_page((unsigned long)kbuf); 461 free_page((unsigned long)kbuf);
457 } 462 }
458 *ppos = p; 463 *ppos = p;
459 return read; 464 return read ? read : err;
460} 465}
461 466
462 467
@@ -520,6 +525,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
520 ssize_t wrote = 0; 525 ssize_t wrote = 0;
521 ssize_t virtr = 0; 526 ssize_t virtr = 0;
522 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ 527 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
528 int err = 0;
523 529
524 if (p < (unsigned long) high_memory) { 530 if (p < (unsigned long) high_memory) {
525 unsigned long to_write = min_t(unsigned long, count, 531 unsigned long to_write = min_t(unsigned long, count,
@@ -540,14 +546,16 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
540 unsigned long sz = size_inside_page(p, count); 546 unsigned long sz = size_inside_page(p, count);
541 unsigned long n; 547 unsigned long n;
542 548
549 if (!is_vmalloc_or_module_addr((void *)p)) {
550 err = -ENXIO;
551 break;
552 }
543 n = copy_from_user(kbuf, buf, sz); 553 n = copy_from_user(kbuf, buf, sz);
544 if (n) { 554 if (n) {
545 if (wrote + virtr) 555 err = -EFAULT;
546 break; 556 break;
547 free_page((unsigned long)kbuf);
548 return -EFAULT;
549 } 557 }
550 sz = vwrite(kbuf, (char *)p, sz); 558 vwrite(kbuf, (char *)p, sz);
551 count -= sz; 559 count -= sz;
552 buf += sz; 560 buf += sz;
553 virtr += sz; 561 virtr += sz;
@@ -556,8 +564,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
556 free_page((unsigned long)kbuf); 564 free_page((unsigned long)kbuf);
557 } 565 }
558 566
559 *ppos = p; 567 *ppos = p;
560 return virtr + wrote; 568 return virtr + wrote ? : err;
561} 569}
562#endif 570#endif
563 571
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index 7d73cd430340..2ad7d37afbd0 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1651,10 +1651,10 @@ static void ntty_close(struct tty_struct *tty, struct file *file)
1651 1651
1652 dc->open_ttys--; 1652 dc->open_ttys--;
1653 port->count--; 1653 port->count--;
1654 tty_port_tty_set(port, NULL);
1655 1654
1656 if (port->count == 0) { 1655 if (port->count == 0) {
1657 DBG1("close: %d", nport->token_dl); 1656 DBG1("close: %d", nport->token_dl);
1657 tty_port_tty_set(port, NULL);
1658 spin_lock_irqsave(&dc->spin_mutex, flags); 1658 spin_lock_irqsave(&dc->spin_mutex, flags);
1659 dc->last_ier &= ~(nport->token_dl); 1659 dc->last_ier &= ~(nport->token_dl);
1660 writew(dc->last_ier, dc->reg_ier); 1660 writew(dc->last_ier, dc->reg_ier);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8258982b49ec..2849713d2231 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1051,12 +1051,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1051 /* like a named pipe */ 1051 /* like a named pipe */
1052 } 1052 }
1053 1053
1054 /*
1055 * If we gave the user some bytes, update the access time.
1056 */
1057 if (count)
1058 file_accessed(file);
1059
1060 return (count ? count : retval); 1054 return (count ? count : retval);
1061} 1055}
1062 1056
@@ -1107,7 +1101,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
1107 size_t count, loff_t *ppos) 1101 size_t count, loff_t *ppos)
1108{ 1102{
1109 size_t ret; 1103 size_t ret;
1110 struct inode *inode = file->f_path.dentry->d_inode;
1111 1104
1112 ret = write_pool(&blocking_pool, buffer, count); 1105 ret = write_pool(&blocking_pool, buffer, count);
1113 if (ret) 1106 if (ret)
@@ -1116,8 +1109,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
1116 if (ret) 1109 if (ret)
1117 return ret; 1110 return ret;
1118 1111
1119 inode->i_mtime = current_fs_time(inode->i_sb);
1120 mark_inode_dirty(inode);
1121 return (ssize_t)count; 1112 return (ssize_t)count;
1122} 1113}
1123 1114
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 0798754a607c..bba727c3807e 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -50,7 +50,6 @@
50#include <linux/err.h> 50#include <linux/err.h>
51#include <linux/kfifo.h> 51#include <linux/kfifo.h>
52#include <linux/platform_device.h> 52#include <linux/platform_device.h>
53#include <linux/smp_lock.h>
54 53
55#include <asm/uaccess.h> 54#include <asm/uaccess.h>
56#include <asm/io.h> 55#include <asm/io.h>
@@ -905,14 +904,13 @@ static int sonypi_misc_release(struct inode *inode, struct file *file)
905 904
906static int sonypi_misc_open(struct inode *inode, struct file *file) 905static int sonypi_misc_open(struct inode *inode, struct file *file)
907{ 906{
908 lock_kernel();
909 mutex_lock(&sonypi_device.lock); 907 mutex_lock(&sonypi_device.lock);
910 /* Flush input queue on first open */ 908 /* Flush input queue on first open */
911 if (!sonypi_device.open_count) 909 if (!sonypi_device.open_count)
912 kfifo_reset(&sonypi_device.fifo); 910 kfifo_reset(&sonypi_device.fifo);
913 sonypi_device.open_count++; 911 sonypi_device.open_count++;
914 mutex_unlock(&sonypi_device.lock); 912 mutex_unlock(&sonypi_device.lock);
915 unlock_kernel(); 913
916 return 0; 914 return 0;
917} 915}
918 916
@@ -955,10 +953,10 @@ static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
955 return 0; 953 return 0;
956} 954}
957 955
958static int sonypi_misc_ioctl(struct inode *ip, struct file *fp, 956static long sonypi_misc_ioctl(struct file *fp,
959 unsigned int cmd, unsigned long arg) 957 unsigned int cmd, unsigned long arg)
960{ 958{
961 int ret = 0; 959 long ret = 0;
962 void __user *argp = (void __user *)arg; 960 void __user *argp = (void __user *)arg;
963 u8 val8; 961 u8 val8;
964 u16 val16; 962 u16 val16;
@@ -1074,7 +1072,8 @@ static const struct file_operations sonypi_misc_fops = {
1074 .open = sonypi_misc_open, 1072 .open = sonypi_misc_open,
1075 .release = sonypi_misc_release, 1073 .release = sonypi_misc_release,
1076 .fasync = sonypi_misc_fasync, 1074 .fasync = sonypi_misc_fasync,
1077 .ioctl = sonypi_misc_ioctl, 1075 .unlocked_ioctl = sonypi_misc_ioctl,
1076 .llseek = no_llseek,
1078}; 1077};
1079 1078
1080static struct miscdevice sonypi_misc_device = { 1079static struct miscdevice sonypi_misc_device = {
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 663cd15d7c78..f8bc79f6de34 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -68,7 +68,7 @@
68#include <linux/stat.h> 68#include <linux/stat.h>
69#include <linux/proc_fs.h> 69#include <linux/proc_fs.h>
70#include <linux/seq_file.h> 70#include <linux/seq_file.h>
71 71#include <linux/smp_lock.h>
72#include <linux/toshiba.h> 72#include <linux/toshiba.h>
73 73
74#define TOSH_MINOR_DEV 181 74#define TOSH_MINOR_DEV 181
@@ -88,13 +88,13 @@ static int tosh_date;
88static int tosh_sci; 88static int tosh_sci;
89static int tosh_fan; 89static int tosh_fan;
90 90
91static int tosh_ioctl(struct inode *, struct file *, unsigned int, 91static long tosh_ioctl(struct file *, unsigned int,
92 unsigned long); 92 unsigned long);
93 93
94 94
95static const struct file_operations tosh_fops = { 95static const struct file_operations tosh_fops = {
96 .owner = THIS_MODULE, 96 .owner = THIS_MODULE,
97 .ioctl = tosh_ioctl, 97 .unlocked_ioctl = tosh_ioctl,
98}; 98};
99 99
100static struct miscdevice tosh_device = { 100static struct miscdevice tosh_device = {
@@ -252,8 +252,7 @@ int tosh_smm(SMMRegisters *regs)
252EXPORT_SYMBOL(tosh_smm); 252EXPORT_SYMBOL(tosh_smm);
253 253
254 254
255static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, 255static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
256 unsigned long arg)
257{ 256{
258 SMMRegisters regs; 257 SMMRegisters regs;
259 SMMRegisters __user *argp = (SMMRegisters __user *)arg; 258 SMMRegisters __user *argp = (SMMRegisters __user *)arg;
@@ -275,13 +274,16 @@ static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
275 return -EINVAL; 274 return -EINVAL;
276 275
277 /* do we need to emulate the fan ? */ 276 /* do we need to emulate the fan ? */
277 lock_kernel();
278 if (tosh_fan==1) { 278 if (tosh_fan==1) {
279 if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) { 279 if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) {
280 err = tosh_emulate_fan(&regs); 280 err = tosh_emulate_fan(&regs);
281 unlock_kernel();
281 break; 282 break;
282 } 283 }
283 } 284 }
284 err = tosh_smm(&regs); 285 err = tosh_smm(&regs);
286 unlock_kernel();
285 break; 287 break;
286 default: 288 default:
287 return -EINVAL; 289 return -EINVAL;
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index ecba4942fc8e..f58440791e65 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -39,12 +39,12 @@
39struct tpm_inf_dev { 39struct tpm_inf_dev {
40 int iotype; 40 int iotype;
41 41
42 void __iomem *mem_base; /* MMIO ioremap'd addr */ 42 void __iomem *mem_base; /* MMIO ioremap'd addr */
43 unsigned long map_base; /* phys MMIO base */ 43 unsigned long map_base; /* phys MMIO base */
44 unsigned long map_size; /* MMIO region size */ 44 unsigned long map_size; /* MMIO region size */
45 unsigned int index_off; /* index register offset */ 45 unsigned int index_off; /* index register offset */
46 46
47 unsigned int data_regs; /* Data registers */ 47 unsigned int data_regs; /* Data registers */
48 unsigned int data_size; 48 unsigned int data_size;
49 49
50 unsigned int config_port; /* IO Port config index reg */ 50 unsigned int config_port; /* IO Port config index reg */
@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
406 .miscdev = {.fops = &inf_ops,}, 406 .miscdev = {.fops = &inf_ops,},
407}; 407};
408 408
409static const struct pnp_device_id tpm_pnp_tbl[] = { 409static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
410 /* Infineon TPMs */ 410 /* Infineon TPMs */
411 {"IFX0101", 0}, 411 {"IFX0101", 0},
412 {"IFX0102", 0}, 412 {"IFX0102", 0},
413 {"", 0} 413 {"", 0}
414}; 414};
415 415
416MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 416MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
417 417
418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, 418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
419 const struct pnp_device_id *dev_id) 419 const struct pnp_device_id *dev_id)
@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
430 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && 430 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
431 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { 431 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
432 432
433 tpm_dev.iotype = TPM_INF_IO_PORT; 433 tpm_dev.iotype = TPM_INF_IO_PORT;
434 434
435 tpm_dev.config_port = pnp_port_start(dev, 0); 435 tpm_dev.config_port = pnp_port_start(dev, 0);
436 tpm_dev.config_size = pnp_port_len(dev, 0); 436 tpm_dev.config_size = pnp_port_len(dev, 0);
@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
459 goto err_last; 459 goto err_last;
460 } 460 }
461 } else if (pnp_mem_valid(dev, 0) && 461 } else if (pnp_mem_valid(dev, 0) &&
462 !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { 462 !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
463 463
464 tpm_dev.iotype = TPM_INF_IO_MEM; 464 tpm_dev.iotype = TPM_INF_IO_MEM;
465 465
466 tpm_dev.map_base = pnp_mem_start(dev, 0); 466 tpm_dev.map_base = pnp_mem_start(dev, 0);
467 tpm_dev.map_size = pnp_mem_len(dev, 0); 467 tpm_dev.map_size = pnp_mem_len(dev, 0);
@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
563 "product id 0x%02x%02x" 563 "product id 0x%02x%02x"
564 "%s\n", 564 "%s\n",
565 tpm_dev.iotype == TPM_INF_IO_PORT ? 565 tpm_dev.iotype == TPM_INF_IO_PORT ?
566 tpm_dev.config_port : 566 tpm_dev.config_port :
567 tpm_dev.map_base + tpm_dev.index_off, 567 tpm_dev.map_base + tpm_dev.index_off,
568 tpm_dev.iotype == TPM_INF_IO_PORT ? 568 tpm_dev.iotype == TPM_INF_IO_PORT ?
569 tpm_dev.data_regs : 569 tpm_dev.data_regs :
570 tpm_dev.map_base + tpm_dev.data_regs, 570 tpm_dev.map_base + tpm_dev.data_regs,
571 version[0], version[1], 571 version[0], version[1],
572 vendorid[0], vendorid[1], 572 vendorid[0], vendorid[1],
573 productid[0], productid[1], chipname); 573 productid[0], productid[1], chipname);
@@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
607 iounmap(tpm_dev.mem_base); 607 iounmap(tpm_dev.mem_base);
608 release_mem_region(tpm_dev.map_base, tpm_dev.map_size); 608 release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
609 } 609 }
610 tpm_dev_vendor_release(chip);
610 tpm_remove_hardware(chip->dev); 611 tpm_remove_hardware(chip->dev);
611 } 612 }
612} 613}
613 614
615static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
616{
617 struct tpm_chip *chip = pnp_get_drvdata(dev);
618 int rc;
619 if (chip) {
620 u8 savestate[] = {
621 0, 193, /* TPM_TAG_RQU_COMMAND */
622 0, 0, 0, 10, /* blob length (in bytes) */
623 0, 0, 0, 152 /* TPM_ORD_SaveState */
624 };
625 dev_info(&dev->dev, "saving TPM state\n");
626 rc = tpm_inf_send(chip, savestate, sizeof(savestate));
627 if (rc < 0) {
628 dev_err(&dev->dev, "error while saving TPM state\n");
629 return rc;
630 }
631 }
632 return 0;
633}
634
635static int tpm_inf_pnp_resume(struct pnp_dev *dev)
636{
637 /* Re-configure TPM after suspending */
638 tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
639 tpm_config_out(IOLIMH, TPM_INF_ADDR);
640 tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
641 tpm_config_out(IOLIML, TPM_INF_ADDR);
642 tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
643 /* activate register */
644 tpm_config_out(TPM_DAR, TPM_INF_ADDR);
645 tpm_config_out(0x01, TPM_INF_DATA);
646 tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
647 /* disable RESET, LP and IRQC */
648 tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
649 return tpm_pm_resume(&dev->dev);
650}
651
614static struct pnp_driver tpm_inf_pnp_driver = { 652static struct pnp_driver tpm_inf_pnp_driver = {
615 .name = "tpm_inf_pnp", 653 .name = "tpm_inf_pnp",
616 .driver = { 654 .id_table = tpm_inf_pnp_tbl,
617 .owner = THIS_MODULE,
618 .suspend = tpm_pm_suspend,
619 .resume = tpm_pm_resume,
620 },
621 .id_table = tpm_pnp_tbl,
622 .probe = tpm_inf_pnp_probe, 655 .probe = tpm_inf_pnp_probe,
623 .remove = __devexit_p(tpm_inf_pnp_remove), 656 .suspend = tpm_inf_pnp_suspend,
657 .resume = tpm_inf_pnp_resume,
658 .remove = __devexit_p(tpm_inf_pnp_remove)
624}; 659};
625 660
626static int __init init_inf(void) 661static int __init init_inf(void)
@@ -638,5 +673,5 @@ module_exit(cleanup_inf);
638 673
639MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); 674MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
640MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); 675MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
641MODULE_VERSION("1.9"); 676MODULE_VERSION("1.9.2");
642MODULE_LICENSE("GPL"); 677MODULE_LICENSE("GPL");
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index f15df40bc318..dcb9083ecde0 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on)
1951 pid = task_pid(current); 1951 pid = task_pid(current);
1952 type = PIDTYPE_PID; 1952 type = PIDTYPE_PID;
1953 } 1953 }
1954 get_pid(pid);
1954 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 1955 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1955 retval = __f_setown(filp, pid, type, 0); 1956 retval = __f_setown(filp, pid, type, 0);
1957 put_pid(pid);
1956 if (retval) 1958 if (retval)
1957 goto out; 1959 goto out;
1958 } else { 1960 } else {
diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c
index 867b67be9f0a..c7072ba14f48 100644
--- a/drivers/char/uv_mmtimer.c
+++ b/drivers/char/uv_mmtimer.c
@@ -89,13 +89,17 @@ static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
89 switch (cmd) { 89 switch (cmd) {
90 case MMTIMER_GETOFFSET: /* offset of the counter */ 90 case MMTIMER_GETOFFSET: /* offset of the counter */
91 /* 91 /*
92 * UV RTC register is on its own page 92 * Starting with HUB rev 2.0, the UV RTC register is
93 * replicated across all cachelines of it's own page.
94 * This allows faster simultaneous reads from a given socket.
95 *
96 * The offset returned is in 64 bit units.
93 */ 97 */
94 if (PAGE_SIZE <= (1 << 16)) 98 if (uv_get_min_hub_revision_id() == 1)
95 ret = ((UV_LOCAL_MMR_BASE | UVH_RTC) & (PAGE_SIZE-1)) 99 ret = 0;
96 / 8;
97 else 100 else
98 ret = -ENOSYS; 101 ret = ((uv_blade_processor_id() * L1_CACHE_BYTES) %
102 PAGE_SIZE) / 8;
99 break; 103 break;
100 104
101 case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ 105 case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */
@@ -115,8 +119,8 @@ static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
115 ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK); 119 ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK);
116 break; 120 break;
117 121
118 case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */ 122 case MMTIMER_MMAPAVAIL:
119 ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0; 123 ret = 1;
120 break; 124 break;
121 125
122 case MMTIMER_GETCOUNTER: 126 case MMTIMER_GETCOUNTER:
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index f06024668f99..537c29ac4487 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -36,17 +36,6 @@ MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); 36MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
37MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); 37MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
38 38
39static u32 cn_idx = CN_IDX_CONNECTOR;
40static u32 cn_val = CN_VAL_CONNECTOR;
41
42module_param(cn_idx, uint, 0);
43module_param(cn_val, uint, 0);
44MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
45MODULE_PARM_DESC(cn_val, "Connector's main device val.");
46
47static DEFINE_MUTEX(notify_lock);
48static LIST_HEAD(notify_list);
49
50static struct cn_dev cdev; 39static struct cn_dev cdev;
51 40
52static int cn_already_initialized; 41static int cn_already_initialized;
@@ -210,54 +199,6 @@ static void cn_rx_skb(struct sk_buff *__skb)
210} 199}
211 200
212/* 201/*
213 * Notification routing.
214 *
215 * Gets id and checks if there are notification request for it's idx
216 * and val. If there are such requests notify the listeners with the
217 * given notify event.
218 *
219 */
220static void cn_notify(struct cb_id *id, u32 notify_event)
221{
222 struct cn_ctl_entry *ent;
223
224 mutex_lock(&notify_lock);
225 list_for_each_entry(ent, &notify_list, notify_entry) {
226 int i;
227 struct cn_notify_req *req;
228 struct cn_ctl_msg *ctl = ent->msg;
229 int idx_found, val_found;
230
231 idx_found = val_found = 0;
232
233 req = (struct cn_notify_req *)ctl->data;
234 for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
235 if (id->idx >= req->first &&
236 id->idx < req->first + req->range) {
237 idx_found = 1;
238 break;
239 }
240 }
241
242 for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
243 if (id->val >= req->first &&
244 id->val < req->first + req->range) {
245 val_found = 1;
246 break;
247 }
248 }
249
250 if (idx_found && val_found) {
251 struct cn_msg m = { .ack = notify_event, };
252
253 memcpy(&m.id, id, sizeof(m.id));
254 cn_netlink_send(&m, ctl->group, GFP_KERNEL);
255 }
256 }
257 mutex_unlock(&notify_lock);
258}
259
260/*
261 * Callback add routing - adds callback with given ID and name. 202 * Callback add routing - adds callback with given ID and name.
262 * If there is registered callback with the same ID it will not be added. 203 * If there is registered callback with the same ID it will not be added.
263 * 204 *
@@ -276,8 +217,6 @@ int cn_add_callback(struct cb_id *id, char *name,
276 if (err) 217 if (err)
277 return err; 218 return err;
278 219
279 cn_notify(id, 0);
280
281 return 0; 220 return 0;
282} 221}
283EXPORT_SYMBOL_GPL(cn_add_callback); 222EXPORT_SYMBOL_GPL(cn_add_callback);
@@ -295,111 +234,9 @@ void cn_del_callback(struct cb_id *id)
295 struct cn_dev *dev = &cdev; 234 struct cn_dev *dev = &cdev;
296 235
297 cn_queue_del_callback(dev->cbdev, id); 236 cn_queue_del_callback(dev->cbdev, id);
298 cn_notify(id, 1);
299} 237}
300EXPORT_SYMBOL_GPL(cn_del_callback); 238EXPORT_SYMBOL_GPL(cn_del_callback);
301 239
302/*
303 * Checks two connector's control messages to be the same.
304 * Returns 1 if they are the same or if the first one is corrupted.
305 */
306static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
307{
308 int i;
309 struct cn_notify_req *req1, *req2;
310
311 if (m1->idx_notify_num != m2->idx_notify_num)
312 return 0;
313
314 if (m1->val_notify_num != m2->val_notify_num)
315 return 0;
316
317 if (m1->len != m2->len)
318 return 0;
319
320 if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) !=
321 m1->len)
322 return 1;
323
324 req1 = (struct cn_notify_req *)m1->data;
325 req2 = (struct cn_notify_req *)m2->data;
326
327 for (i = 0; i < m1->idx_notify_num; ++i) {
328 if (req1->first != req2->first || req1->range != req2->range)
329 return 0;
330 req1++;
331 req2++;
332 }
333
334 for (i = 0; i < m1->val_notify_num; ++i) {
335 if (req1->first != req2->first || req1->range != req2->range)
336 return 0;
337 req1++;
338 req2++;
339 }
340
341 return 1;
342}
343
344/*
345 * Main connector device's callback.
346 *
347 * Used for notification of a request's processing.
348 */
349static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
350{
351 struct cn_ctl_msg *ctl;
352 struct cn_ctl_entry *ent;
353 u32 size;
354
355 if (msg->len < sizeof(*ctl))
356 return;
357
358 ctl = (struct cn_ctl_msg *)msg->data;
359
360 size = (sizeof(*ctl) + ((ctl->idx_notify_num +
361 ctl->val_notify_num) *
362 sizeof(struct cn_notify_req)));
363
364 if (msg->len != size)
365 return;
366
367 if (ctl->len + sizeof(*ctl) != msg->len)
368 return;
369
370 /*
371 * Remove notification.
372 */
373 if (ctl->group == 0) {
374 struct cn_ctl_entry *n;
375
376 mutex_lock(&notify_lock);
377 list_for_each_entry_safe(ent, n, &notify_list, notify_entry) {
378 if (cn_ctl_msg_equals(ent->msg, ctl)) {
379 list_del(&ent->notify_entry);
380 kfree(ent);
381 }
382 }
383 mutex_unlock(&notify_lock);
384
385 return;
386 }
387
388 size += sizeof(*ent);
389
390 ent = kzalloc(size, GFP_KERNEL);
391 if (!ent)
392 return;
393
394 ent->msg = (struct cn_ctl_msg *)(ent + 1);
395
396 memcpy(ent->msg, ctl, size - sizeof(*ent));
397
398 mutex_lock(&notify_lock);
399 list_add(&ent->notify_entry, &notify_list);
400 mutex_unlock(&notify_lock);
401}
402
403static int cn_proc_show(struct seq_file *m, void *v) 240static int cn_proc_show(struct seq_file *m, void *v)
404{ 241{
405 struct cn_queue_dev *dev = cdev.cbdev; 242 struct cn_queue_dev *dev = cdev.cbdev;
@@ -437,11 +274,8 @@ static const struct file_operations cn_file_ops = {
437static int __devinit cn_init(void) 274static int __devinit cn_init(void)
438{ 275{
439 struct cn_dev *dev = &cdev; 276 struct cn_dev *dev = &cdev;
440 int err;
441 277
442 dev->input = cn_rx_skb; 278 dev->input = cn_rx_skb;
443 dev->id.idx = cn_idx;
444 dev->id.val = cn_val;
445 279
446 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, 280 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
447 CN_NETLINK_USERS + 0xf, 281 CN_NETLINK_USERS + 0xf,
@@ -457,14 +291,6 @@ static int __devinit cn_init(void)
457 291
458 cn_already_initialized = 1; 292 cn_already_initialized = 1;
459 293
460 err = cn_add_callback(&dev->id, "connector", &cn_callback);
461 if (err) {
462 cn_already_initialized = 0;
463 cn_queue_free_dev(dev->cbdev);
464 netlink_kernel_release(dev->nls);
465 return -EINVAL;
466 }
467
468 proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops); 294 proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
469 295
470 return 0; 296 return 0;
@@ -478,7 +304,6 @@ static void __devexit cn_fini(void)
478 304
479 proc_net_remove(&init_net, "connector"); 305 proc_net_remove(&init_net, "connector");
480 306
481 cn_del_callback(&dev->id);
482 cn_queue_free_dev(dev->cbdev); 307 cn_queue_free_dev(dev->cbdev);
483 netlink_kernel_release(dev->nls); 308 netlink_kernel_release(dev->nls);
484} 309}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b34ade2332b..bd444dc93cf2 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
554 (dbs_tuners_ins.up_threshold - 554 (dbs_tuners_ins.up_threshold -
555 dbs_tuners_ins.down_differential); 555 dbs_tuners_ins.down_differential);
556 556
557 if (freq_next < policy->min)
558 freq_next = policy->min;
559
557 if (!dbs_tuners_ins.powersave_bias) { 560 if (!dbs_tuners_ins.powersave_bias) {
558 __cpufreq_driver_target(policy, freq_next, 561 __cpufreq_driver_target(policy, freq_next,
559 CPUFREQ_RELATION_L); 562 CPUFREQ_RELATION_L);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 68104434ebb5..73655aeb3a60 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -18,6 +18,7 @@
18#include <linux/hrtimer.h> 18#include <linux/hrtimer.h>
19#include <linux/tick.h> 19#include <linux/tick.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/math64.h>
21 22
22#define BUCKETS 12 23#define BUCKETS 12
23#define RESOLUTION 1024 24#define RESOLUTION 1024
@@ -169,6 +170,12 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
169 170
170static void menu_update(struct cpuidle_device *dev); 171static void menu_update(struct cpuidle_device *dev);
171 172
173/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
174static u64 div_round64(u64 dividend, u32 divisor)
175{
176 return div_u64(dividend + (divisor / 2), divisor);
177}
178
172/** 179/**
173 * menu_select - selects the next idle state to enter 180 * menu_select - selects the next idle state to enter
174 * @dev: the CPU 181 * @dev: the CPU
@@ -209,9 +216,8 @@ static int menu_select(struct cpuidle_device *dev)
209 data->correction_factor[data->bucket] = RESOLUTION * DECAY; 216 data->correction_factor[data->bucket] = RESOLUTION * DECAY;
210 217
211 /* Make sure to round up for half microseconds */ 218 /* Make sure to round up for half microseconds */
212 data->predicted_us = DIV_ROUND_CLOSEST( 219 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
213 data->expected_us * data->correction_factor[data->bucket], 220 RESOLUTION * DECAY);
214 RESOLUTION * DECAY);
215 221
216 /* 222 /*
217 * We want to default to C1 (hlt), not to busy polling 223 * We want to default to C1 (hlt), not to busy polling
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 0af80577dc7b..d3a27e0119bc 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -57,6 +57,23 @@ static int padlock_sha_update(struct shash_desc *desc,
57 return crypto_shash_update(&dctx->fallback, data, length); 57 return crypto_shash_update(&dctx->fallback, data, length);
58} 58}
59 59
60static int padlock_sha_export(struct shash_desc *desc, void *out)
61{
62 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
63
64 return crypto_shash_export(&dctx->fallback, out);
65}
66
67static int padlock_sha_import(struct shash_desc *desc, const void *in)
68{
69 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
70 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
71
72 dctx->fallback.tfm = ctx->fallback;
73 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
74 return crypto_shash_import(&dctx->fallback, in);
75}
76
60static inline void padlock_output_block(uint32_t *src, 77static inline void padlock_output_block(uint32_t *src,
61 uint32_t *dst, size_t count) 78 uint32_t *dst, size_t count)
62{ 79{
@@ -235,7 +252,10 @@ static struct shash_alg sha1_alg = {
235 .update = padlock_sha_update, 252 .update = padlock_sha_update,
236 .finup = padlock_sha1_finup, 253 .finup = padlock_sha1_finup,
237 .final = padlock_sha1_final, 254 .final = padlock_sha1_final,
255 .export = padlock_sha_export,
256 .import = padlock_sha_import,
238 .descsize = sizeof(struct padlock_sha_desc), 257 .descsize = sizeof(struct padlock_sha_desc),
258 .statesize = sizeof(struct sha1_state),
239 .base = { 259 .base = {
240 .cra_name = "sha1", 260 .cra_name = "sha1",
241 .cra_driver_name = "sha1-padlock", 261 .cra_driver_name = "sha1-padlock",
@@ -256,7 +276,10 @@ static struct shash_alg sha256_alg = {
256 .update = padlock_sha_update, 276 .update = padlock_sha_update,
257 .finup = padlock_sha256_finup, 277 .finup = padlock_sha256_finup,
258 .final = padlock_sha256_final, 278 .final = padlock_sha256_final,
279 .export = padlock_sha_export,
280 .import = padlock_sha_import,
259 .descsize = sizeof(struct padlock_sha_desc), 281 .descsize = sizeof(struct padlock_sha_desc),
282 .statesize = sizeof(struct sha256_state),
260 .base = { 283 .base = {
261 .cra_name = "sha256", 284 .cra_name = "sha256",
262 .cra_driver_name = "sha256-padlock", 285 .cra_driver_name = "sha256-padlock",
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index f15112569c1d..efc1a61ca231 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan,
815 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", 815 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
816 cookie, done ? *done : 0, used ? *used : 0); 816 cookie, done ? *done : 0, used ? *used : 0);
817 817
818 spin_lock_bh(atchan->lock); 818 spin_lock_bh(&atchan->lock);
819 819
820 last_complete = atchan->completed_cookie; 820 last_complete = atchan->completed_cookie;
821 last_used = chan->cookie; 821 last_used = chan->cookie;
@@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan,
830 ret = dma_async_is_complete(cookie, last_complete, last_used); 830 ret = dma_async_is_complete(cookie, last_complete, last_used);
831 } 831 }
832 832
833 spin_unlock_bh(atchan->lock); 833 spin_unlock_bh(&atchan->lock);
834 834
835 if (done) 835 if (done)
836 *done = last_complete; 836 *done = last_complete;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 4a99cd94536b..64a937262a40 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data)
613 cohd_fin->pending_irqs--; 613 cohd_fin->pending_irqs--;
614 cohc->completed = cohd_fin->desc.cookie; 614 cohc->completed = cohd_fin->desc.cookie;
615 615
616 BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
617
618 if (cohc->nbr_active_done == 0) 616 if (cohc->nbr_active_done == 0)
619 return; 617 return;
620 618
@@ -1294,8 +1292,8 @@ static int __exit coh901318_remove(struct platform_device *pdev)
1294 dma_async_device_unregister(&base->dma_slave); 1292 dma_async_device_unregister(&base->dma_slave);
1295 coh901318_pool_destroy(&base->pool); 1293 coh901318_pool_destroy(&base->pool);
1296 free_irq(platform_get_irq(pdev, 0), base); 1294 free_irq(platform_get_irq(pdev, 0), base);
1297 kfree(base);
1298 iounmap(base->virtbase); 1295 iounmap(base->virtbase);
1296 kfree(base);
1299 release_mem_region(pdev->resource->start, 1297 release_mem_region(pdev->resource->start,
1300 resource_size(pdev->resource)); 1298 resource_size(pdev->resource));
1301 return 0; 1299 return 0;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6f51a0a7a8bb..e7a3230fb7d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
826 chan->dev->chan = NULL; 826 chan->dev->chan = NULL;
827 mutex_unlock(&dma_list_mutex); 827 mutex_unlock(&dma_list_mutex);
828 device_unregister(&chan->dev->device); 828 device_unregister(&chan->dev->device);
829 free_percpu(chan->local);
829 } 830 }
830} 831}
831EXPORT_SYMBOL(dma_async_device_unregister); 832EXPORT_SYMBOL(dma_async_device_unregister);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 8b905161fbf4..948d563941c9 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -467,7 +467,7 @@ err_srcs:
467 467
468 if (iterations > 0) 468 if (iterations > 0)
469 while (!kthread_should_stop()) { 469 while (!kthread_should_stop()) {
470 DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); 470 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
471 interruptible_sleep_on(&wait_dmatest_exit); 471 interruptible_sleep_on(&wait_dmatest_exit);
472 } 472 }
473 473
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 285bed0fe17b..d28369f7afd2 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1270,8 +1270,6 @@ static int __init dw_probe(struct platform_device *pdev)
1270 goto err_kfree; 1270 goto err_kfree;
1271 } 1271 }
1272 1272
1273 memset(dw, 0, sizeof *dw);
1274
1275 dw->regs = ioremap(io->start, DW_REGLEN); 1273 dw->regs = ioremap(io->start, DW_REGLEN);
1276 if (!dw->regs) { 1274 if (!dw->regs) {
1277 err = -ENOMEM; 1275 err = -ENOMEM;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index c524d36d3c2e..dcc4ab78b32b 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
1032 dma->dev = &pdev->dev; 1032 dma->dev = &pdev->dev;
1033 1033
1034 if (!dma->chancnt) { 1034 if (!dma->chancnt) {
1035 dev_err(dev, "zero channels detected\n"); 1035 dev_err(dev, "channel enumeration error\n");
1036 goto err_setup_interrupts; 1036 goto err_setup_interrupts;
1037 } 1037 }
1038 1038
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 45edde996480..bbc3e78ef333 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -60,6 +60,7 @@
60 * @dca: direct cache access context 60 * @dca: direct cache access context
61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) 61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
62 * @enumerate_channels: hw version specific channel enumeration 62 * @enumerate_channels: hw version specific channel enumeration
63 * @reset_hw: hw version specific channel (re)initialization
63 * @cleanup_tasklet: select between the v2 and v3 cleanup routines 64 * @cleanup_tasklet: select between the v2 and v3 cleanup routines
64 * @timer_fn: select between the v2 and v3 timer watchdog routines 65 * @timer_fn: select between the v2 and v3 timer watchdog routines
65 * @self_test: hardware version specific self test for each supported op type 66 * @self_test: hardware version specific self test for each supported op type
@@ -78,6 +79,7 @@ struct ioatdma_device {
78 struct dca_provider *dca; 79 struct dca_provider *dca;
79 void (*intr_quirk)(struct ioatdma_device *device); 80 void (*intr_quirk)(struct ioatdma_device *device);
80 int (*enumerate_channels)(struct ioatdma_device *device); 81 int (*enumerate_channels)(struct ioatdma_device *device);
82 int (*reset_hw)(struct ioat_chan_common *chan);
81 void (*cleanup_tasklet)(unsigned long data); 83 void (*cleanup_tasklet)(unsigned long data);
82 void (*timer_fn)(unsigned long data); 84 void (*timer_fn)(unsigned long data);
83 int (*self_test)(struct ioatdma_device *device); 85 int (*self_test)(struct ioatdma_device *device);
@@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
264 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 266 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
265} 267}
266 268
269static inline void ioat_reset(struct ioat_chan_common *chan)
270{
271 u8 ver = chan->device->version;
272
273 writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
274}
275
276static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
277{
278 u8 ver = chan->device->version;
279 u8 cmd;
280
281 cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
282 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
283}
284
267static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) 285static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
268{ 286{
269 struct ioat_chan_common *chan = &ioat->base; 287 struct ioat_chan_common *chan = &ioat->base;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 8f1f7f05deaa..5cc37afe2bc1 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
239 __ioat2_start_null_desc(ioat); 239 __ioat2_start_null_desc(ioat);
240} 240}
241 241
242static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) 242int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
243{ 243{
244 struct ioat_chan_common *chan = &ioat->base; 244 unsigned long end = jiffies + tmo;
245 unsigned long phys_complete; 245 int err = 0;
246 u32 status; 246 u32 status;
247 247
248 status = ioat_chansts(chan); 248 status = ioat_chansts(chan);
249 if (is_ioat_active(status) || is_ioat_idle(status)) 249 if (is_ioat_active(status) || is_ioat_idle(status))
250 ioat_suspend(chan); 250 ioat_suspend(chan);
251 while (is_ioat_active(status) || is_ioat_idle(status)) { 251 while (is_ioat_active(status) || is_ioat_idle(status)) {
252 if (tmo && time_after(jiffies, end)) {
253 err = -ETIMEDOUT;
254 break;
255 }
252 status = ioat_chansts(chan); 256 status = ioat_chansts(chan);
253 cpu_relax(); 257 cpu_relax();
254 } 258 }
255 259
260 return err;
261}
262
263int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
264{
265 unsigned long end = jiffies + tmo;
266 int err = 0;
267
268 ioat_reset(chan);
269 while (ioat_reset_pending(chan)) {
270 if (end && time_after(jiffies, end)) {
271 err = -ETIMEDOUT;
272 break;
273 }
274 cpu_relax();
275 }
276
277 return err;
278}
279
280static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
281{
282 struct ioat_chan_common *chan = &ioat->base;
283 unsigned long phys_complete;
284
285 ioat2_quiesce(chan, 0);
256 if (ioat_cleanup_preamble(chan, &phys_complete)) 286 if (ioat_cleanup_preamble(chan, &phys_complete))
257 __cleanup(ioat, phys_complete); 287 __cleanup(ioat, phys_complete);
258 288
@@ -318,6 +348,19 @@ void ioat2_timer_event(unsigned long data)
318 spin_unlock_bh(&chan->cleanup_lock); 348 spin_unlock_bh(&chan->cleanup_lock);
319} 349}
320 350
351static int ioat2_reset_hw(struct ioat_chan_common *chan)
352{
353 /* throw away whatever the channel was doing and get it initialized */
354 u32 chanerr;
355
356 ioat2_quiesce(chan, msecs_to_jiffies(100));
357
358 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
359 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
360
361 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
362}
363
321/** 364/**
322 * ioat2_enumerate_channels - find and initialize the device's channels 365 * ioat2_enumerate_channels - find and initialize the device's channels
323 * @device: the device to be enumerated 366 * @device: the device to be enumerated
@@ -360,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
360 (unsigned long) ioat); 403 (unsigned long) ioat);
361 ioat->xfercap_log = xfercap_log; 404 ioat->xfercap_log = xfercap_log;
362 spin_lock_init(&ioat->ring_lock); 405 spin_lock_init(&ioat->ring_lock);
406 if (device->reset_hw(&ioat->base)) {
407 i = 0;
408 break;
409 }
363 } 410 }
364 dma->chancnt = i; 411 dma->chancnt = i;
365 return i; 412 return i;
@@ -467,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
467 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 514 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
468 struct ioat_chan_common *chan = &ioat->base; 515 struct ioat_chan_common *chan = &ioat->base;
469 struct ioat_ring_ent **ring; 516 struct ioat_ring_ent **ring;
470 u32 chanerr;
471 int order; 517 int order;
472 518
473 /* have we already been set up? */ 519 /* have we already been set up? */
@@ -477,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
477 /* Setup register to interrupt and write completion status on error */ 523 /* Setup register to interrupt and write completion status on error */
478 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); 524 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
479 525
480 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
481 if (chanerr) {
482 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
483 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
484 }
485
486 /* allocate a completion writeback area */ 526 /* allocate a completion writeback area */
487 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 527 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
488 chan->completion = pci_pool_alloc(chan->device->completion_pool, 528 chan->completion = pci_pool_alloc(chan->device->completion_pool,
@@ -746,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
746 tasklet_disable(&chan->cleanup_task); 786 tasklet_disable(&chan->cleanup_task);
747 del_timer_sync(&chan->timer); 787 del_timer_sync(&chan->timer);
748 device->cleanup_tasklet((unsigned long) ioat); 788 device->cleanup_tasklet((unsigned long) ioat);
749 789 device->reset_hw(chan);
750 /* Delay 100ms after reset to allow internal DMA logic to quiesce
751 * before removing DMA descriptor resources.
752 */
753 writeb(IOAT_CHANCMD_RESET,
754 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
755 mdelay(100);
756 790
757 spin_lock_bh(&ioat->ring_lock); 791 spin_lock_bh(&ioat->ring_lock);
758 descs = ioat2_ring_space(ioat); 792 descs = ioat2_ring_space(ioat);
@@ -839,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
839 int err; 873 int err;
840 874
841 device->enumerate_channels = ioat2_enumerate_channels; 875 device->enumerate_channels = ioat2_enumerate_channels;
876 device->reset_hw = ioat2_reset_hw;
842 device->cleanup_tasklet = ioat2_cleanup_tasklet; 877 device->cleanup_tasklet = ioat2_cleanup_tasklet;
843 device->timer_fn = ioat2_timer_event; 878 device->timer_fn = ioat2_timer_event;
844 device->self_test = ioat_dma_self_test; 879 device->self_test = ioat_dma_self_test;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 1d849ef74d5f..3afad8da43cc 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); 185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
186void ioat2_cleanup_tasklet(unsigned long data); 186void ioat2_cleanup_tasklet(unsigned long data);
187void ioat2_timer_event(unsigned long data); 187void ioat2_timer_event(unsigned long data);
188int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
189int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
188extern struct kobj_type ioat2_ktype; 190extern struct kobj_type ioat2_ktype;
189extern struct kmem_cache *ioat2_cache; 191extern struct kmem_cache *ioat2_cache;
190#endif /* IOATDMA_V2_H */ 192#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 42f6f10fb0cc..9908c9e94b2d 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -650,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
650 650
651 num_descs = ioat2_xferlen_to_descs(ioat, len); 651 num_descs = ioat2_xferlen_to_descs(ioat, len);
652 /* we need 2x the number of descriptors to cover greater than 3 652 /* we need 2x the number of descriptors to cover greater than 3
653 * sources 653 * sources (we need 1 extra source in the q-only continuation
654 * case and 3 extra sources in the p+q continuation case.
654 */ 655 */
655 if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) { 656 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
657 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
656 with_ext = 1; 658 with_ext = 1;
657 num_descs *= 2; 659 num_descs *= 2;
658 } else 660 } else
@@ -1128,6 +1130,45 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1128 return 0; 1130 return 0;
1129} 1131}
1130 1132
1133static int ioat3_reset_hw(struct ioat_chan_common *chan)
1134{
1135 /* throw away whatever the channel was doing and get it
1136 * initialized, with ioat3 specific workarounds
1137 */
1138 struct ioatdma_device *device = chan->device;
1139 struct pci_dev *pdev = device->pdev;
1140 u32 chanerr;
1141 u16 dev_id;
1142 int err;
1143
1144 ioat2_quiesce(chan, msecs_to_jiffies(100));
1145
1146 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1147 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1148
1149 /* -= IOAT ver.3 workarounds =- */
1150 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1151 * that can cause stability issues for IOAT ver.3, and clear any
1152 * pending errors
1153 */
1154 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1155 err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1156 if (err) {
1157 dev_err(&pdev->dev, "channel error register unreachable\n");
1158 return err;
1159 }
1160 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1161
1162 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1163 * (workaround for spurious config parity error after restart)
1164 */
1165 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1166 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1167 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1168
1169 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1170}
1171
1131int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) 1172int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1132{ 1173{
1133 struct pci_dev *pdev = device->pdev; 1174 struct pci_dev *pdev = device->pdev;
@@ -1137,10 +1178,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1137 struct ioat_chan_common *chan; 1178 struct ioat_chan_common *chan;
1138 bool is_raid_device = false; 1179 bool is_raid_device = false;
1139 int err; 1180 int err;
1140 u16 dev_id;
1141 u32 cap; 1181 u32 cap;
1142 1182
1143 device->enumerate_channels = ioat2_enumerate_channels; 1183 device->enumerate_channels = ioat2_enumerate_channels;
1184 device->reset_hw = ioat3_reset_hw;
1144 device->self_test = ioat3_dma_self_test; 1185 device->self_test = ioat3_dma_self_test;
1145 dma = &device->common; 1186 dma = &device->common;
1146 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 1187 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
@@ -1216,19 +1257,6 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1216 dma->device_prep_dma_xor_val = NULL; 1257 dma->device_prep_dma_xor_val = NULL;
1217 #endif 1258 #endif
1218 1259
1219 /* -= IOAT ver.3 workarounds =- */
1220 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1221 * that can cause stability issues for IOAT ver.3
1222 */
1223 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1224
1225 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1226 * (workaround for spurious config parity error after restart)
1227 */
1228 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1229 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1230 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1231
1232 err = ioat_probe(device); 1260 err = ioat_probe(device);
1233 if (err) 1261 if (err)
1234 return err; 1262 return err;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index f015ec196700..e8ae63baf588 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -27,6 +27,7 @@
27 27
28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02 28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
30#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
30#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 31#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
31 32
32/* MMIO Device Registers */ 33/* MMIO Device Registers */
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 9a5bc1a7389e..e80bae1673fa 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
761 * @buffer_n: buffer number to update. 761 * @buffer_n: buffer number to update.
762 * 0 or 1 are the only valid values. 762 * 0 or 1 are the only valid values.
763 * @phyaddr: buffer physical address. 763 * @phyaddr: buffer physical address.
764 * @return: Returns 0 on success or negative error code on failure. This
765 * function will fail if the buffer is set to ready.
766 */ 764 */
767/* Called under spin_lock(_irqsave)(&ichan->lock) */ 765/* Called under spin_lock(_irqsave)(&ichan->lock) */
768static int ipu_update_channel_buffer(struct idmac_channel *ichan, 766static void ipu_update_channel_buffer(struct idmac_channel *ichan,
769 int buffer_n, dma_addr_t phyaddr) 767 int buffer_n, dma_addr_t phyaddr)
770{ 768{
771 enum ipu_channel channel = ichan->dma_chan.chan_id; 769 enum ipu_channel channel = ichan->dma_chan.chan_id;
772 uint32_t reg; 770 uint32_t reg;
@@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
806 } 804 }
807 805
808 spin_unlock_irqrestore(&ipu_data.lock, flags); 806 spin_unlock_irqrestore(&ipu_data.lock, flags);
809
810 return 0;
811} 807}
812 808
813/* Called under spin_lock_irqsave(&ichan->lock) */ 809/* Called under spin_lock_irqsave(&ichan->lock) */
@@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
816{ 812{
817 unsigned int chan_id = ichan->dma_chan.chan_id; 813 unsigned int chan_id = ichan->dma_chan.chan_id;
818 struct device *dev = &ichan->dma_chan.dev->device; 814 struct device *dev = &ichan->dma_chan.dev->device;
819 int ret;
820 815
821 if (async_tx_test_ack(&desc->txd)) 816 if (async_tx_test_ack(&desc->txd))
822 return -EINTR; 817 return -EINTR;
@@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
827 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but 822 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
828 * doing it again shouldn't hurt either. 823 * doing it again shouldn't hurt either.
829 */ 824 */
830 ret = ipu_update_channel_buffer(ichan, buf_idx, 825 ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
831 sg_dma_address(sg));
832
833 if (ret < 0) {
834 dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
835 sg, chan_id, buf_idx);
836 return ret;
837 }
838 826
839 ipu_select_buffer(chan_id, buf_idx); 827 ipu_select_buffer(chan_id, buf_idx);
840 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", 828 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
@@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1379 1367
1380 if (likely(sgnew) && 1368 if (likely(sgnew) &&
1381 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { 1369 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1382 callback = desc->txd.callback; 1370 callback = descnew->txd.callback;
1383 callback_param = desc->txd.callback_param; 1371 callback_param = descnew->txd.callback_param;
1384 spin_unlock(&ichan->lock); 1372 spin_unlock(&ichan->lock);
1385 callback(callback_param); 1373 if (callback)
1374 callback(callback_param);
1386 spin_lock(&ichan->lock); 1375 spin_lock(&ichan->lock);
1387 } 1376 }
1388 1377
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 2e4a54c8afeb..d10cc899c460 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,16 +23,19 @@
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/dmapool.h>
27#include <linux/platform_device.h> 26#include <linux/platform_device.h>
28#include <cpu/dma.h> 27#include <cpu/dma.h>
29#include <asm/dma-sh.h> 28#include <asm/dma-sh.h>
30#include "shdma.h" 29#include "shdma.h"
31 30
32/* DMA descriptor control */ 31/* DMA descriptor control */
33#define DESC_LAST (-1) 32enum sh_dmae_desc_status {
34#define DESC_COMP (1) 33 DESC_IDLE,
35#define DESC_NCOMP (0) 34 DESC_PREPARED,
35 DESC_SUBMITTED,
36 DESC_COMPLETED, /* completed, have to call callback */
37 DESC_WAITING, /* callback called, waiting for ack / re-submit */
38};
36 39
37#define NR_DESCS_PER_CHANNEL 32 40#define NR_DESCS_PER_CHANNEL 32
38/* 41/*
@@ -45,6 +48,8 @@
45 */ 48 */
46#define RS_DEFAULT (RS_DUAL) 49#define RS_DEFAULT (RS_DUAL)
47 50
51static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
52
48#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) 53#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
49static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 54static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
50{ 55{
@@ -106,11 +111,11 @@ static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
106 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; 111 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
107} 112}
108 113
109static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw) 114static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
110{ 115{
111 sh_dmae_writel(sh_chan, hw.sar, SAR); 116 sh_dmae_writel(sh_chan, hw->sar, SAR);
112 sh_dmae_writel(sh_chan, hw.dar, DAR); 117 sh_dmae_writel(sh_chan, hw->dar, DAR);
113 sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR); 118 sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
114} 119}
115 120
116static void dmae_start(struct sh_dmae_chan *sh_chan) 121static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -184,8 +189,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
184 189
185static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 190static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
186{ 191{
187 struct sh_desc *desc = tx_to_sh_desc(tx); 192 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
188 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 193 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
194 dma_async_tx_callback callback = tx->callback;
189 dma_cookie_t cookie; 195 dma_cookie_t cookie;
190 196
191 spin_lock_bh(&sh_chan->desc_lock); 197 spin_lock_bh(&sh_chan->desc_lock);
@@ -195,45 +201,53 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
195 if (cookie < 0) 201 if (cookie < 0)
196 cookie = 1; 202 cookie = 1;
197 203
198 /* If desc only in the case of 1 */ 204 sh_chan->common.cookie = cookie;
199 if (desc->async_tx.cookie != -EBUSY) 205 tx->cookie = cookie;
200 desc->async_tx.cookie = cookie; 206
201 sh_chan->common.cookie = desc->async_tx.cookie; 207 /* Mark all chunks of this descriptor as submitted, move to the queue */
208 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
209 /*
210 * All chunks are on the global ld_free, so, we have to find
211 * the end of the chain ourselves
212 */
213 if (chunk != desc && (chunk->mark == DESC_IDLE ||
214 chunk->async_tx.cookie > 0 ||
215 chunk->async_tx.cookie == -EBUSY ||
216 &chunk->node == &sh_chan->ld_free))
217 break;
218 chunk->mark = DESC_SUBMITTED;
219 /* Callback goes to the last chunk */
220 chunk->async_tx.callback = NULL;
221 chunk->cookie = cookie;
222 list_move_tail(&chunk->node, &sh_chan->ld_queue);
223 last = chunk;
224 }
225
226 last->async_tx.callback = callback;
227 last->async_tx.callback_param = tx->callback_param;
202 228
203 list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev); 229 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
230 tx->cookie, &last->async_tx, sh_chan->id,
231 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
204 232
205 spin_unlock_bh(&sh_chan->desc_lock); 233 spin_unlock_bh(&sh_chan->desc_lock);
206 234
207 return cookie; 235 return cookie;
208} 236}
209 237
238/* Called with desc_lock held */
210static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) 239static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
211{ 240{
212 struct sh_desc *desc, *_desc, *ret = NULL; 241 struct sh_desc *desc;
213 242
214 spin_lock_bh(&sh_chan->desc_lock); 243 list_for_each_entry(desc, &sh_chan->ld_free, node)
215 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) { 244 if (desc->mark != DESC_PREPARED) {
216 if (async_tx_test_ack(&desc->async_tx)) { 245 BUG_ON(desc->mark != DESC_IDLE);
217 list_del(&desc->node); 246 list_del(&desc->node);
218 ret = desc; 247 return desc;
219 break;
220 } 248 }
221 }
222 spin_unlock_bh(&sh_chan->desc_lock);
223
224 return ret;
225}
226
227static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc)
228{
229 if (desc) {
230 spin_lock_bh(&sh_chan->desc_lock);
231
232 list_splice_init(&desc->tx_list, &sh_chan->ld_free);
233 list_add(&desc->node, &sh_chan->ld_free);
234 249
235 spin_unlock_bh(&sh_chan->desc_lock); 250 return NULL;
236 }
237} 251}
238 252
239static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 253static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
@@ -252,11 +266,10 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
252 dma_async_tx_descriptor_init(&desc->async_tx, 266 dma_async_tx_descriptor_init(&desc->async_tx,
253 &sh_chan->common); 267 &sh_chan->common);
254 desc->async_tx.tx_submit = sh_dmae_tx_submit; 268 desc->async_tx.tx_submit = sh_dmae_tx_submit;
255 desc->async_tx.flags = DMA_CTRL_ACK; 269 desc->mark = DESC_IDLE;
256 INIT_LIST_HEAD(&desc->tx_list);
257 sh_dmae_put_desc(sh_chan, desc);
258 270
259 spin_lock_bh(&sh_chan->desc_lock); 271 spin_lock_bh(&sh_chan->desc_lock);
272 list_add(&desc->node, &sh_chan->ld_free);
260 sh_chan->descs_allocated++; 273 sh_chan->descs_allocated++;
261 } 274 }
262 spin_unlock_bh(&sh_chan->desc_lock); 275 spin_unlock_bh(&sh_chan->desc_lock);
@@ -273,7 +286,10 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
273 struct sh_desc *desc, *_desc; 286 struct sh_desc *desc, *_desc;
274 LIST_HEAD(list); 287 LIST_HEAD(list);
275 288
276 BUG_ON(!list_empty(&sh_chan->ld_queue)); 289 /* Prepared and not submitted descriptors can still be on the queue */
290 if (!list_empty(&sh_chan->ld_queue))
291 sh_dmae_chan_ld_cleanup(sh_chan, true);
292
277 spin_lock_bh(&sh_chan->desc_lock); 293 spin_lock_bh(&sh_chan->desc_lock);
278 294
279 list_splice_init(&sh_chan->ld_free, &list); 295 list_splice_init(&sh_chan->ld_free, &list);
@@ -292,6 +308,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
292 struct sh_dmae_chan *sh_chan; 308 struct sh_dmae_chan *sh_chan;
293 struct sh_desc *first = NULL, *prev = NULL, *new; 309 struct sh_desc *first = NULL, *prev = NULL, *new;
294 size_t copy_size; 310 size_t copy_size;
311 LIST_HEAD(tx_list);
312 int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
295 313
296 if (!chan) 314 if (!chan)
297 return NULL; 315 return NULL;
@@ -301,108 +319,189 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
301 319
302 sh_chan = to_sh_chan(chan); 320 sh_chan = to_sh_chan(chan);
303 321
322 /* Have to lock the whole loop to protect against concurrent release */
323 spin_lock_bh(&sh_chan->desc_lock);
324
325 /*
326 * Chaining:
327 * first descriptor is what user is dealing with in all API calls, its
328 * cookie is at first set to -EBUSY, at tx-submit to a positive
329 * number
330 * if more than one chunk is needed further chunks have cookie = -EINVAL
331 * the last chunk, if not equal to the first, has cookie = -ENOSPC
332 * all chunks are linked onto the tx_list head with their .node heads
333 * only during this function, then they are immediately spliced
334 * back onto the free list in form of a chain
335 */
304 do { 336 do {
305 /* Allocate the link descriptor from DMA pool */ 337 /* Allocate the link descriptor from the free list */
306 new = sh_dmae_get_desc(sh_chan); 338 new = sh_dmae_get_desc(sh_chan);
307 if (!new) { 339 if (!new) {
308 dev_err(sh_chan->dev, 340 dev_err(sh_chan->dev,
309 "No free memory for link descriptor\n"); 341 "No free memory for link descriptor\n");
310 goto err_get_desc; 342 list_for_each_entry(new, &tx_list, node)
343 new->mark = DESC_IDLE;
344 list_splice(&tx_list, &sh_chan->ld_free);
345 spin_unlock_bh(&sh_chan->desc_lock);
346 return NULL;
311 } 347 }
312 348
313 copy_size = min(len, (size_t)SH_DMA_TCR_MAX); 349 copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
314 350
315 new->hw.sar = dma_src; 351 new->hw.sar = dma_src;
316 new->hw.dar = dma_dest; 352 new->hw.dar = dma_dest;
317 new->hw.tcr = copy_size; 353 new->hw.tcr = copy_size;
318 if (!first) 354 if (!first) {
355 /* First desc */
356 new->async_tx.cookie = -EBUSY;
319 first = new; 357 first = new;
358 } else {
359 /* Other desc - invisible to the user */
360 new->async_tx.cookie = -EINVAL;
361 }
320 362
321 new->mark = DESC_NCOMP; 363 dev_dbg(sh_chan->dev,
322 async_tx_ack(&new->async_tx); 364 "chaining %u of %u with %p, dst %x, cookie %d\n",
365 copy_size, len, &new->async_tx, dma_dest,
366 new->async_tx.cookie);
367
368 new->mark = DESC_PREPARED;
369 new->async_tx.flags = flags;
370 new->chunks = chunks--;
323 371
324 prev = new; 372 prev = new;
325 len -= copy_size; 373 len -= copy_size;
326 dma_src += copy_size; 374 dma_src += copy_size;
327 dma_dest += copy_size; 375 dma_dest += copy_size;
328 /* Insert the link descriptor to the LD ring */ 376 /* Insert the link descriptor to the LD ring */
329 list_add_tail(&new->node, &first->tx_list); 377 list_add_tail(&new->node, &tx_list);
330 } while (len); 378 } while (len);
331 379
332 new->async_tx.flags = flags; /* client is in control of this ack */ 380 if (new != first)
333 new->async_tx.cookie = -EBUSY; /* Last desc */ 381 new->async_tx.cookie = -ENOSPC;
334 382
335 return &first->async_tx; 383 /* Put them back on the free list, so, they don't get lost */
384 list_splice_tail(&tx_list, &sh_chan->ld_free);
336 385
337err_get_desc: 386 spin_unlock_bh(&sh_chan->desc_lock);
338 sh_dmae_put_desc(sh_chan, first);
339 return NULL;
340 387
388 return &first->async_tx;
341} 389}
342 390
343/* 391static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
344 * sh_chan_ld_cleanup - Clean up link descriptors
345 *
346 * This function clean up the ld_queue of DMA channel.
347 */
348static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan)
349{ 392{
350 struct sh_desc *desc, *_desc; 393 struct sh_desc *desc, *_desc;
394 /* Is the "exposed" head of a chain acked? */
395 bool head_acked = false;
396 dma_cookie_t cookie = 0;
397 dma_async_tx_callback callback = NULL;
398 void *param = NULL;
351 399
352 spin_lock_bh(&sh_chan->desc_lock); 400 spin_lock_bh(&sh_chan->desc_lock);
353 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 401 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
354 dma_async_tx_callback callback; 402 struct dma_async_tx_descriptor *tx = &desc->async_tx;
355 void *callback_param; 403
356 404 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
357 /* non send data */ 405 BUG_ON(desc->mark != DESC_SUBMITTED &&
358 if (desc->mark == DESC_NCOMP) 406 desc->mark != DESC_COMPLETED &&
407 desc->mark != DESC_WAITING);
408
409 /*
410 * queue is ordered, and we use this loop to (1) clean up all
411 * completed descriptors, and to (2) update descriptor flags of
412 * any chunks in a (partially) completed chain
413 */
414 if (!all && desc->mark == DESC_SUBMITTED &&
415 desc->cookie != cookie)
359 break; 416 break;
360 417
361 /* send data sesc */ 418 if (tx->cookie > 0)
362 callback = desc->async_tx.callback; 419 cookie = tx->cookie;
363 callback_param = desc->async_tx.callback_param;
364 420
365 /* Remove from ld_queue list */ 421 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
366 list_splice_init(&desc->tx_list, &sh_chan->ld_free); 422 BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
423 sh_chan->completed_cookie = desc->cookie;
424 }
367 425
368 dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n", 426 /* Call callback on the last chunk */
369 desc); 427 if (desc->mark == DESC_COMPLETED && tx->callback) {
428 desc->mark = DESC_WAITING;
429 callback = tx->callback;
430 param = tx->callback_param;
431 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
432 tx->cookie, tx, sh_chan->id);
433 BUG_ON(desc->chunks != 1);
434 break;
435 }
370 436
371 list_move(&desc->node, &sh_chan->ld_free); 437 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
372 /* Run the link descriptor callback function */ 438 if (desc->mark == DESC_COMPLETED) {
373 if (callback) { 439 BUG_ON(tx->cookie < 0);
374 spin_unlock_bh(&sh_chan->desc_lock); 440 desc->mark = DESC_WAITING;
375 dev_dbg(sh_chan->dev, "link descriptor %p callback\n", 441 }
376 desc); 442 head_acked = async_tx_test_ack(tx);
377 callback(callback_param); 443 } else {
378 spin_lock_bh(&sh_chan->desc_lock); 444 switch (desc->mark) {
445 case DESC_COMPLETED:
446 desc->mark = DESC_WAITING;
447 /* Fall through */
448 case DESC_WAITING:
449 if (head_acked)
450 async_tx_ack(&desc->async_tx);
451 }
452 }
453
454 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
455 tx, tx->cookie);
456
457 if (((desc->mark == DESC_COMPLETED ||
458 desc->mark == DESC_WAITING) &&
459 async_tx_test_ack(&desc->async_tx)) || all) {
460 /* Remove from ld_queue list */
461 desc->mark = DESC_IDLE;
462 list_move(&desc->node, &sh_chan->ld_free);
379 } 463 }
380 } 464 }
381 spin_unlock_bh(&sh_chan->desc_lock); 465 spin_unlock_bh(&sh_chan->desc_lock);
466
467 if (callback)
468 callback(param);
469
470 return callback;
471}
472
473/*
474 * sh_chan_ld_cleanup - Clean up link descriptors
475 *
476 * This function cleans up the ld_queue of DMA channel.
477 */
478static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
479{
480 while (__ld_cleanup(sh_chan, all))
481 ;
382} 482}
383 483
384static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 484static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
385{ 485{
386 struct list_head *ld_node; 486 struct sh_desc *sd;
387 struct sh_dmae_regs hw;
388 487
488 spin_lock_bh(&sh_chan->desc_lock);
389 /* DMA work check */ 489 /* DMA work check */
390 if (dmae_is_busy(sh_chan)) 490 if (dmae_is_busy(sh_chan)) {
491 spin_unlock_bh(&sh_chan->desc_lock);
391 return; 492 return;
493 }
392 494
393 /* Find the first un-transfer desciptor */ 495 /* Find the first un-transfer desciptor */
394 for (ld_node = sh_chan->ld_queue.next; 496 list_for_each_entry(sd, &sh_chan->ld_queue, node)
395 (ld_node != &sh_chan->ld_queue) 497 if (sd->mark == DESC_SUBMITTED) {
396 && (to_sh_desc(ld_node)->mark == DESC_COMP); 498 /* Get the ld start address from ld_queue */
397 ld_node = ld_node->next) 499 dmae_set_reg(sh_chan, &sd->hw);
398 cpu_relax(); 500 dmae_start(sh_chan);
399 501 break;
400 if (ld_node != &sh_chan->ld_queue) { 502 }
401 /* Get the ld start address from ld_queue */ 503
402 hw = to_sh_desc(ld_node)->hw; 504 spin_unlock_bh(&sh_chan->desc_lock);
403 dmae_set_reg(sh_chan, hw);
404 dmae_start(sh_chan);
405 }
406} 505}
407 506
408static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 507static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
@@ -420,12 +519,11 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
420 dma_cookie_t last_used; 519 dma_cookie_t last_used;
421 dma_cookie_t last_complete; 520 dma_cookie_t last_complete;
422 521
423 sh_dmae_chan_ld_cleanup(sh_chan); 522 sh_dmae_chan_ld_cleanup(sh_chan, false);
424 523
425 last_used = chan->cookie; 524 last_used = chan->cookie;
426 last_complete = sh_chan->completed_cookie; 525 last_complete = sh_chan->completed_cookie;
427 if (last_complete == -EBUSY) 526 BUG_ON(last_complete < 0);
428 last_complete = last_used;
429 527
430 if (done) 528 if (done)
431 *done = last_complete; 529 *done = last_complete;
@@ -480,11 +578,13 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
480 err = sh_dmae_rst(0); 578 err = sh_dmae_rst(0);
481 if (err) 579 if (err)
482 return err; 580 return err;
581#ifdef SH_DMAC_BASE1
483 if (shdev->pdata.mode & SHDMA_DMAOR1) { 582 if (shdev->pdata.mode & SHDMA_DMAOR1) {
484 err = sh_dmae_rst(1); 583 err = sh_dmae_rst(1);
485 if (err) 584 if (err)
486 return err; 585 return err;
487 } 586 }
587#endif
488 disable_irq(irq); 588 disable_irq(irq);
489 return IRQ_HANDLED; 589 return IRQ_HANDLED;
490 } 590 }
@@ -494,35 +594,25 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
494static void dmae_do_tasklet(unsigned long data) 594static void dmae_do_tasklet(unsigned long data)
495{ 595{
496 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 596 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
497 struct sh_desc *desc, *_desc, *cur_desc = NULL; 597 struct sh_desc *desc;
498 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 598 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
499 599
500 list_for_each_entry_safe(desc, _desc, 600 spin_lock(&sh_chan->desc_lock);
501 &sh_chan->ld_queue, node) { 601 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
502 if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { 602 if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
503 cur_desc = desc; 603 desc->mark == DESC_SUBMITTED) {
604 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
605 desc->async_tx.cookie, &desc->async_tx,
606 desc->hw.dar);
607 desc->mark = DESC_COMPLETED;
504 break; 608 break;
505 } 609 }
506 } 610 }
611 spin_unlock(&sh_chan->desc_lock);
507 612
508 if (cur_desc) {
509 switch (cur_desc->async_tx.cookie) {
510 case 0: /* other desc data */
511 break;
512 case -EBUSY: /* last desc */
513 sh_chan->completed_cookie =
514 cur_desc->async_tx.cookie;
515 break;
516 default: /* first desc ( 0 < )*/
517 sh_chan->completed_cookie =
518 cur_desc->async_tx.cookie - 1;
519 break;
520 }
521 cur_desc->mark = DESC_COMP;
522 }
523 /* Next desc */ 613 /* Next desc */
524 sh_chan_xfer_ld_queue(sh_chan); 614 sh_chan_xfer_ld_queue(sh_chan);
525 sh_dmae_chan_ld_cleanup(sh_chan); 615 sh_dmae_chan_ld_cleanup(sh_chan, false);
526} 616}
527 617
528static unsigned int get_dmae_irq(unsigned int id) 618static unsigned int get_dmae_irq(unsigned int id)
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 60b81e529b42..108f1cffb6f5 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -13,9 +13,9 @@
13#ifndef __DMA_SHDMA_H 13#ifndef __DMA_SHDMA_H
14#define __DMA_SHDMA_H 14#define __DMA_SHDMA_H
15 15
16#include <linux/device.h>
17#include <linux/dmapool.h>
18#include <linux/dmaengine.h> 16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/list.h>
19 19
20#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 20#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
21 21
@@ -26,13 +26,16 @@ struct sh_dmae_regs {
26}; 26};
27 27
28struct sh_desc { 28struct sh_desc {
29 struct list_head tx_list;
30 struct sh_dmae_regs hw; 29 struct sh_dmae_regs hw;
31 struct list_head node; 30 struct list_head node;
32 struct dma_async_tx_descriptor async_tx; 31 struct dma_async_tx_descriptor async_tx;
32 dma_cookie_t cookie;
33 int chunks;
33 int mark; 34 int mark;
34}; 35};
35 36
37struct device;
38
36struct sh_dmae_chan { 39struct sh_dmae_chan {
37 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 40 dma_cookie_t completed_cookie; /* The maximum cookie completed */
38 spinlock_t desc_lock; /* Descriptor operation lock */ 41 spinlock_t desc_lock; /* Descriptor operation lock */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index c5facd951dda..3391e6739d06 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -197,7 +197,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
197 edac_printk(KERN_DEBUG, EDAC_MC, 197 edac_printk(KERN_DEBUG, EDAC_MC,
198 "pci-read, sdram scrub control value: %d \n", scrubval); 198 "pci-read, sdram scrub control value: %d \n", scrubval);
199 199
200 for (i = 0; ARRAY_SIZE(scrubrates); i++) { 200 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
201 if (scrubrates[i].scrubval == scrubval) { 201 if (scrubrates[i].scrubval == scrubval) {
202 *bw = scrubrates[i].bandwidth; 202 *bw = scrubrates[i].bandwidth;
203 status = 0; 203 status = 0;
@@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2658 * the memory system completely. A command line option allows to force-enable 2658 * the memory system completely. A command line option allows to force-enable
2659 * hardware ECC later in amd64_enable_ecc_error_reporting(). 2659 * hardware ECC later in amd64_enable_ecc_error_reporting().
2660 */ 2660 */
2661static const char *ecc_warning = 2661static const char *ecc_msg =
2662 "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n" 2662 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2663 " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n" 2663 " Either enable ECC checking or force module loading by setting "
2664 " Also, use of the override can cause unknown side effects.\n"; 2664 "'ecc_enable_override'.\n"
2665 " (Note that use of the override may cause unknown side effects.)\n";
2665 2666
2666static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) 2667static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2667{ 2668{
@@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2673 2674
2674 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); 2675 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2675 if (!ecc_enabled) 2676 if (!ecc_enabled)
2676 amd64_printk(KERN_WARNING, "This node reports that Memory ECC " 2677 amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
2677 "is currently disabled, set F3x%x[22] (%s).\n", 2678 "is currently disabled, set F3x%x[22] (%s).\n",
2678 K8_NBCFG, pci_name(pvt->misc_f3_ctl)); 2679 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2679 else 2680 else
@@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2681 2682
2682 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); 2683 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2683 if (!nb_mce_en) 2684 if (!nb_mce_en)
2684 amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR " 2685 amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
2685 "0x%08x[4] on node %d to enable.\n", 2686 "0x%08x[4] on node %d to enable.\n",
2686 MSR_IA32_MCG_CTL, pvt->mc_node_id); 2687 MSR_IA32_MCG_CTL, pvt->mc_node_id);
2687 2688
2688 if (!ecc_enabled || !nb_mce_en) { 2689 if (!ecc_enabled || !nb_mce_en) {
2689 if (!ecc_enable_override) { 2690 if (!ecc_enable_override) {
2690 amd64_printk(KERN_WARNING, "%s", ecc_warning); 2691 amd64_printk(KERN_NOTICE, "%s", ecc_msg);
2691 return -ENODEV; 2692 return -ENODEV;
2692 } 2693 }
2693 ecc_enable_override = 0; 2694 ecc_enable_override = 0;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 77a9579d7167..adc10a2ac5f6 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -577,7 +577,13 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
577 debugf0("\tUncorrected bits= 0x%x\n", ue_errors); 577 debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
578 578
579 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); 579 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
580 channel = branch; 580
581 /*
582 * According with i5000 datasheet, bit 28 has no significance
583 * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD
584 */
585 channel = branch & 2;
586
581 bank = NREC_BANK(info->nrecmema); 587 bank = NREC_BANK(info->nrecmema);
582 rank = NREC_RANK(info->nrecmema); 588 rank = NREC_RANK(info->nrecmema);
583 rdwr = NREC_RDWR(info->nrecmema); 589 rdwr = NREC_RDWR(info->nrecmema);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index cf27402af97b..ecd5928d7110 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
804 end <<= (24 - PAGE_SHIFT); 804 end <<= (24 - PAGE_SHIFT);
805 end |= (1 << (24 - PAGE_SHIFT)) - 1; 805 end |= (1 << (24 - PAGE_SHIFT)) - 1;
806 806
807 csrow->first_page = start >> PAGE_SHIFT; 807 csrow->first_page = start;
808 csrow->last_page = end >> PAGE_SHIFT; 808 csrow->last_page = end;
809 csrow->nr_pages = end + 1 - start; 809 csrow->nr_pages = end + 1 - start;
810 csrow->grain = 8; 810 csrow->grain = 8;
811 csrow->mtype = mtype; 811 csrow->mtype = mtype;
@@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
892 892
893 mpc85xx_init_csrows(mci); 893 mpc85xx_init_csrows(mci);
894 894
895#ifdef CONFIG_EDAC_DEBUG
896 edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
897#endif
898
899 /* store the original error disable bits */ 895 /* store the original error disable bits */
900 orig_ddr_err_disable = 896 orig_ddr_err_disable =
901 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); 897 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 13efcd362072..a9371b36a9b9 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,5 +1,10 @@
1menu "IEEE 1394 (FireWire) support"
2 depends on PCI || BROKEN
3 # firewire-core does not depend on PCI but is
4 # not useful without PCI controller driver
5
1comment "You can enable one or both FireWire driver stacks." 6comment "You can enable one or both FireWire driver stacks."
2comment "See the help texts for more information." 7comment "The newer stack is recommended."
3 8
4config FIREWIRE 9config FIREWIRE
5 tristate "FireWire driver stack" 10 tristate "FireWire driver stack"
@@ -15,16 +20,6 @@ config FIREWIRE
15 To compile this driver as a module, say M here: the module will be 20 To compile this driver as a module, say M here: the module will be
16 called firewire-core. 21 called firewire-core.
17 22
18 This module functionally replaces ieee1394, raw1394, and video1394.
19 To access it from application programs, you generally need at least
20 libraw1394 v2. IIDC/DCAM applications need libdc1394 v2.
21 No libraries are required to access storage devices through the
22 firewire-sbp2 driver.
23
24 NOTE:
25 FireWire audio devices currently require the old drivers (ieee1394,
26 ohci1394, raw1394).
27
28config FIREWIRE_OHCI 23config FIREWIRE_OHCI
29 tristate "OHCI-1394 controllers" 24 tristate "OHCI-1394 controllers"
30 depends on PCI && FIREWIRE 25 depends on PCI && FIREWIRE
@@ -34,22 +29,7 @@ config FIREWIRE_OHCI
34 is the only chipset in use, so say Y here. 29 is the only chipset in use, so say Y here.
35 30
36 To compile this driver as a module, say M here: The module will be 31 To compile this driver as a module, say M here: The module will be
37 called firewire-ohci. It replaces ohci1394 of the classic IEEE 1394 32 called firewire-ohci.
38 stack.
39
40 NOTE:
41 If you want to install firewire-ohci and ohci1394 together, you
42 should configure them only as modules and blacklist the driver(s)
43 which you don't want to have auto-loaded. Add either
44
45 blacklist firewire-ohci
46 or
47 blacklist ohci1394
48 blacklist video1394
49 blacklist dv1394
50
51 to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
52 depending on your distribution.
53 33
54config FIREWIRE_OHCI_DEBUG 34config FIREWIRE_OHCI_DEBUG
55 bool 35 bool
@@ -66,8 +46,7 @@ config FIREWIRE_SBP2
66 like scanners. 46 like scanners.
67 47
68 To compile this driver as a module, say M here: The module will be 48 To compile this driver as a module, say M here: The module will be
69 called firewire-sbp2. It replaces sbp2 of the classic IEEE 1394 49 called firewire-sbp2.
70 stack.
71 50
72 You should also enable support for disks, CD-ROMs, etc. in the SCSI 51 You should also enable support for disks, CD-ROMs, etc. in the SCSI
73 configuration section. 52 configuration section.
@@ -83,5 +62,8 @@ config FIREWIRE_NET
83 NOTE, this driver is not stable yet! 62 NOTE, this driver is not stable yet!
84 63
85 To compile this driver as a module, say M here: The module will be 64 To compile this driver as a module, say M here: The module will be
86 called firewire-net. It replaces eth1394 of the classic IEEE 1394 65 called firewire-net.
87 stack. 66
67source "drivers/ieee1394/Kconfig"
68
69endmenu
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 7083bcc1b9c7..5045156c5313 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -57,6 +57,8 @@ static LIST_HEAD(descriptor_list);
57static int descriptor_count; 57static int descriptor_count;
58 58
59static __be32 tmp_config_rom[256]; 59static __be32 tmp_config_rom[256];
60/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
61static size_t config_rom_length = 1 + 4 + 1 + 1;
60 62
61#define BIB_CRC(v) ((v) << 0) 63#define BIB_CRC(v) ((v) << 0)
62#define BIB_CRC_LENGTH(v) ((v) << 16) 64#define BIB_CRC_LENGTH(v) ((v) << 16)
@@ -73,7 +75,7 @@ static __be32 tmp_config_rom[256];
73#define BIB_CMC ((1) << 30) 75#define BIB_CMC ((1) << 30)
74#define BIB_IMC ((1) << 31) 76#define BIB_IMC ((1) << 31)
75 77
76static size_t generate_config_rom(struct fw_card *card, __be32 *config_rom) 78static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
77{ 79{
78 struct fw_descriptor *desc; 80 struct fw_descriptor *desc;
79 int i, j, k, length; 81 int i, j, k, length;
@@ -130,23 +132,30 @@ static size_t generate_config_rom(struct fw_card *card, __be32 *config_rom)
130 for (i = 0; i < j; i += length + 1) 132 for (i = 0; i < j; i += length + 1)
131 length = fw_compute_block_crc(config_rom + i); 133 length = fw_compute_block_crc(config_rom + i);
132 134
133 return j; 135 WARN_ON(j != config_rom_length);
134} 136}
135 137
136static void update_config_roms(void) 138static void update_config_roms(void)
137{ 139{
138 struct fw_card *card; 140 struct fw_card *card;
139 size_t length;
140 141
141 list_for_each_entry (card, &card_list, link) { 142 list_for_each_entry (card, &card_list, link) {
142 length = generate_config_rom(card, tmp_config_rom); 143 generate_config_rom(card, tmp_config_rom);
143 card->driver->set_config_rom(card, tmp_config_rom, length); 144 card->driver->set_config_rom(card, tmp_config_rom,
145 config_rom_length);
144 } 146 }
145} 147}
146 148
149static size_t required_space(struct fw_descriptor *desc)
150{
151 /* descriptor + entry into root dir + optional immediate entry */
152 return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
153}
154
147int fw_core_add_descriptor(struct fw_descriptor *desc) 155int fw_core_add_descriptor(struct fw_descriptor *desc)
148{ 156{
149 size_t i; 157 size_t i;
158 int ret;
150 159
151 /* 160 /*
152 * Check descriptor is valid; the length of all blocks in the 161 * Check descriptor is valid; the length of all blocks in the
@@ -162,15 +171,21 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
162 171
163 mutex_lock(&card_mutex); 172 mutex_lock(&card_mutex);
164 173
165 list_add_tail(&desc->link, &descriptor_list); 174 if (config_rom_length + required_space(desc) > 256) {
166 descriptor_count++; 175 ret = -EBUSY;
167 if (desc->immediate > 0) 176 } else {
177 list_add_tail(&desc->link, &descriptor_list);
178 config_rom_length += required_space(desc);
168 descriptor_count++; 179 descriptor_count++;
169 update_config_roms(); 180 if (desc->immediate > 0)
181 descriptor_count++;
182 update_config_roms();
183 ret = 0;
184 }
170 185
171 mutex_unlock(&card_mutex); 186 mutex_unlock(&card_mutex);
172 187
173 return 0; 188 return ret;
174} 189}
175EXPORT_SYMBOL(fw_core_add_descriptor); 190EXPORT_SYMBOL(fw_core_add_descriptor);
176 191
@@ -179,6 +194,7 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
179 mutex_lock(&card_mutex); 194 mutex_lock(&card_mutex);
180 195
181 list_del(&desc->link); 196 list_del(&desc->link);
197 config_rom_length -= required_space(desc);
182 descriptor_count--; 198 descriptor_count--;
183 if (desc->immediate > 0) 199 if (desc->immediate > 0)
184 descriptor_count--; 200 descriptor_count--;
@@ -428,7 +444,6 @@ EXPORT_SYMBOL(fw_card_initialize);
428int fw_card_add(struct fw_card *card, 444int fw_card_add(struct fw_card *card,
429 u32 max_receive, u32 link_speed, u64 guid) 445 u32 max_receive, u32 link_speed, u64 guid)
430{ 446{
431 size_t length;
432 int ret; 447 int ret;
433 448
434 card->max_receive = max_receive; 449 card->max_receive = max_receive;
@@ -437,8 +452,8 @@ int fw_card_add(struct fw_card *card,
437 452
438 mutex_lock(&card_mutex); 453 mutex_lock(&card_mutex);
439 454
440 length = generate_config_rom(card, tmp_config_rom); 455 generate_config_rom(card, tmp_config_rom);
441 ret = card->driver->enable(card, tmp_config_rom, length); 456 ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
442 if (ret == 0) 457 if (ret == 0)
443 list_add_tail(&card->link, &card_list); 458 list_add_tail(&card->link, &card_list);
444 459
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 231e6ee5ba43..4eeaed57e219 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -35,6 +35,7 @@
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/string.h>
38#include <linux/time.h> 39#include <linux/time.h>
39#include <linux/uaccess.h> 40#include <linux/uaccess.h>
40#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
@@ -595,14 +596,22 @@ static int ioctl_send_request(struct client *client, void *buffer)
595 client->device->max_speed); 596 client->device->max_speed);
596} 597}
597 598
599static inline bool is_fcp_request(struct fw_request *request)
600{
601 return request == NULL;
602}
603
598static void release_request(struct client *client, 604static void release_request(struct client *client,
599 struct client_resource *resource) 605 struct client_resource *resource)
600{ 606{
601 struct inbound_transaction_resource *r = container_of(resource, 607 struct inbound_transaction_resource *r = container_of(resource,
602 struct inbound_transaction_resource, resource); 608 struct inbound_transaction_resource, resource);
603 609
604 fw_send_response(client->device->card, r->request, 610 if (is_fcp_request(r->request))
605 RCODE_CONFLICT_ERROR); 611 kfree(r->data);
612 else
613 fw_send_response(client->device->card, r->request,
614 RCODE_CONFLICT_ERROR);
606 kfree(r); 615 kfree(r);
607} 616}
608 617
@@ -615,6 +624,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
615 struct address_handler_resource *handler = callback_data; 624 struct address_handler_resource *handler = callback_data;
616 struct inbound_transaction_resource *r; 625 struct inbound_transaction_resource *r;
617 struct inbound_transaction_event *e; 626 struct inbound_transaction_event *e;
627 void *fcp_frame = NULL;
618 int ret; 628 int ret;
619 629
620 r = kmalloc(sizeof(*r), GFP_ATOMIC); 630 r = kmalloc(sizeof(*r), GFP_ATOMIC);
@@ -626,6 +636,18 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
626 r->data = payload; 636 r->data = payload;
627 r->length = length; 637 r->length = length;
628 638
639 if (is_fcp_request(request)) {
640 /*
641 * FIXME: Let core-transaction.c manage a
642 * single reference-counted copy?
643 */
644 fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
645 if (fcp_frame == NULL)
646 goto failed;
647
648 r->data = fcp_frame;
649 }
650
629 r->resource.release = release_request; 651 r->resource.release = release_request;
630 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); 652 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
631 if (ret < 0) 653 if (ret < 0)
@@ -639,13 +661,16 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
639 e->request.closure = handler->closure; 661 e->request.closure = handler->closure;
640 662
641 queue_event(handler->client, &e->event, 663 queue_event(handler->client, &e->event,
642 &e->request, sizeof(e->request), payload, length); 664 &e->request, sizeof(e->request), r->data, length);
643 return; 665 return;
644 666
645 failed: 667 failed:
646 kfree(r); 668 kfree(r);
647 kfree(e); 669 kfree(e);
648 fw_send_response(card, request, RCODE_CONFLICT_ERROR); 670 kfree(fcp_frame);
671
672 if (!is_fcp_request(request))
673 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
649} 674}
650 675
651static void release_address_handler(struct client *client, 676static void release_address_handler(struct client *client,
@@ -715,14 +740,16 @@ static int ioctl_send_response(struct client *client, void *buffer)
715 740
716 r = container_of(resource, struct inbound_transaction_resource, 741 r = container_of(resource, struct inbound_transaction_resource,
717 resource); 742 resource);
743 if (is_fcp_request(r->request))
744 goto out;
745
718 if (request->length < r->length) 746 if (request->length < r->length)
719 r->length = request->length; 747 r->length = request->length;
720
721 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) { 748 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) {
722 ret = -EFAULT; 749 ret = -EFAULT;
750 kfree(r->request);
723 goto out; 751 goto out;
724 } 752 }
725
726 fw_send_response(client->device->card, r->request, request->rcode); 753 fw_send_response(client->device->card, r->request, request->rcode);
727 out: 754 out:
728 kfree(r); 755 kfree(r);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 842739df23e2..495849eb13cc 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -432,14 +432,20 @@ static struct fw_address_handler *lookup_overlapping_address_handler(
432 return NULL; 432 return NULL;
433} 433}
434 434
435static bool is_enclosing_handler(struct fw_address_handler *handler,
436 unsigned long long offset, size_t length)
437{
438 return handler->offset <= offset &&
439 offset + length <= handler->offset + handler->length;
440}
441
435static struct fw_address_handler *lookup_enclosing_address_handler( 442static struct fw_address_handler *lookup_enclosing_address_handler(
436 struct list_head *list, unsigned long long offset, size_t length) 443 struct list_head *list, unsigned long long offset, size_t length)
437{ 444{
438 struct fw_address_handler *handler; 445 struct fw_address_handler *handler;
439 446
440 list_for_each_entry(handler, list, link) { 447 list_for_each_entry(handler, list, link) {
441 if (handler->offset <= offset && 448 if (is_enclosing_handler(handler, offset, length))
442 offset + length <= handler->offset + handler->length)
443 return handler; 449 return handler;
444 } 450 }
445 451
@@ -465,6 +471,12 @@ const struct fw_address_region fw_unit_space_region =
465 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; 471 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
466#endif /* 0 */ 472#endif /* 0 */
467 473
474static bool is_in_fcp_region(u64 offset, size_t length)
475{
476 return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
477 offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END);
478}
479
468/** 480/**
469 * fw_core_add_address_handler - register for incoming requests 481 * fw_core_add_address_handler - register for incoming requests
470 * @handler: callback 482 * @handler: callback
@@ -477,8 +489,11 @@ const struct fw_address_region fw_unit_space_region =
477 * give the details of the particular request. 489 * give the details of the particular request.
478 * 490 *
479 * Return value: 0 on success, non-zero otherwise. 491 * Return value: 0 on success, non-zero otherwise.
492 *
480 * The start offset of the handler's address region is determined by 493 * The start offset of the handler's address region is determined by
481 * fw_core_add_address_handler() and is returned in handler->offset. 494 * fw_core_add_address_handler() and is returned in handler->offset.
495 *
496 * Address allocations are exclusive, except for the FCP registers.
482 */ 497 */
483int fw_core_add_address_handler(struct fw_address_handler *handler, 498int fw_core_add_address_handler(struct fw_address_handler *handler,
484 const struct fw_address_region *region) 499 const struct fw_address_region *region)
@@ -498,10 +513,12 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
498 513
499 handler->offset = region->start; 514 handler->offset = region->start;
500 while (handler->offset + handler->length <= region->end) { 515 while (handler->offset + handler->length <= region->end) {
501 other = 516 if (is_in_fcp_region(handler->offset, handler->length))
502 lookup_overlapping_address_handler(&address_handler_list, 517 other = NULL;
503 handler->offset, 518 else
504 handler->length); 519 other = lookup_overlapping_address_handler
520 (&address_handler_list,
521 handler->offset, handler->length);
505 if (other != NULL) { 522 if (other != NULL) {
506 handler->offset += other->length; 523 handler->offset += other->length;
507 } else { 524 } else {
@@ -668,6 +685,9 @@ static struct fw_request *allocate_request(struct fw_packet *p)
668void fw_send_response(struct fw_card *card, 685void fw_send_response(struct fw_card *card,
669 struct fw_request *request, int rcode) 686 struct fw_request *request, int rcode)
670{ 687{
688 if (WARN_ONCE(!request, "invalid for FCP address handlers"))
689 return;
690
671 /* unified transaction or broadcast transaction: don't respond */ 691 /* unified transaction or broadcast transaction: don't respond */
672 if (request->ack != ACK_PENDING || 692 if (request->ack != ACK_PENDING ||
673 HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) { 693 HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
@@ -686,26 +706,15 @@ void fw_send_response(struct fw_card *card,
686} 706}
687EXPORT_SYMBOL(fw_send_response); 707EXPORT_SYMBOL(fw_send_response);
688 708
689void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) 709static void handle_exclusive_region_request(struct fw_card *card,
710 struct fw_packet *p,
711 struct fw_request *request,
712 unsigned long long offset)
690{ 713{
691 struct fw_address_handler *handler; 714 struct fw_address_handler *handler;
692 struct fw_request *request;
693 unsigned long long offset;
694 unsigned long flags; 715 unsigned long flags;
695 int tcode, destination, source; 716 int tcode, destination, source;
696 717
697 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
698 return;
699
700 request = allocate_request(p);
701 if (request == NULL) {
702 /* FIXME: send statically allocated busy packet. */
703 return;
704 }
705
706 offset =
707 ((unsigned long long)
708 HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
709 tcode = HEADER_GET_TCODE(p->header[0]); 718 tcode = HEADER_GET_TCODE(p->header[0]);
710 destination = HEADER_GET_DESTINATION(p->header[0]); 719 destination = HEADER_GET_DESTINATION(p->header[0]);
711 source = HEADER_GET_SOURCE(p->header[1]); 720 source = HEADER_GET_SOURCE(p->header[1]);
@@ -732,6 +741,73 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
732 request->data, request->length, 741 request->data, request->length,
733 handler->callback_data); 742 handler->callback_data);
734} 743}
744
745static void handle_fcp_region_request(struct fw_card *card,
746 struct fw_packet *p,
747 struct fw_request *request,
748 unsigned long long offset)
749{
750 struct fw_address_handler *handler;
751 unsigned long flags;
752 int tcode, destination, source;
753
754 if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
755 offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
756 request->length > 0x200) {
757 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
758
759 return;
760 }
761
762 tcode = HEADER_GET_TCODE(p->header[0]);
763 destination = HEADER_GET_DESTINATION(p->header[0]);
764 source = HEADER_GET_SOURCE(p->header[1]);
765
766 if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
767 tcode != TCODE_WRITE_BLOCK_REQUEST) {
768 fw_send_response(card, request, RCODE_TYPE_ERROR);
769
770 return;
771 }
772
773 spin_lock_irqsave(&address_handler_lock, flags);
774 list_for_each_entry(handler, &address_handler_list, link) {
775 if (is_enclosing_handler(handler, offset, request->length))
776 handler->address_callback(card, NULL, tcode,
777 destination, source,
778 p->generation, p->speed,
779 offset, request->data,
780 request->length,
781 handler->callback_data);
782 }
783 spin_unlock_irqrestore(&address_handler_lock, flags);
784
785 fw_send_response(card, request, RCODE_COMPLETE);
786}
787
788void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
789{
790 struct fw_request *request;
791 unsigned long long offset;
792
793 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
794 return;
795
796 request = allocate_request(p);
797 if (request == NULL) {
798 /* FIXME: send statically allocated busy packet. */
799 return;
800 }
801
802 offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) |
803 p->header[2];
804
805 if (!is_in_fcp_region(offset, request->length))
806 handle_exclusive_region_request(card, p, request, offset);
807 else
808 handle_fcp_region_request(card, p, request, offset);
809
810}
735EXPORT_SYMBOL(fw_core_handle_request); 811EXPORT_SYMBOL(fw_core_handle_request);
736 812
737void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) 813void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index cbaf420c36c5..2d3dc7ded0a9 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
893 893
894static struct kmem_cache *fwnet_packet_task_cache; 894static struct kmem_cache *fwnet_packet_task_cache;
895 895
896static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
897{
898 dev_kfree_skb_any(ptask->skb);
899 kmem_cache_free(fwnet_packet_task_cache, ptask);
900}
901
896static int fwnet_send_packet(struct fwnet_packet_task *ptask); 902static int fwnet_send_packet(struct fwnet_packet_task *ptask);
897 903
898static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) 904static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
899{ 905{
900 struct fwnet_device *dev; 906 struct fwnet_device *dev = ptask->dev;
901 unsigned long flags; 907 unsigned long flags;
902 908 bool free;
903 dev = ptask->dev;
904 909
905 spin_lock_irqsave(&dev->lock, flags); 910 spin_lock_irqsave(&dev->lock, flags);
906 list_del(&ptask->pt_link);
907 spin_unlock_irqrestore(&dev->lock, flags);
908 911
909 ptask->outstanding_pkts--; /* FIXME access inside lock */ 912 ptask->outstanding_pkts--;
913
914 /* Check whether we or the networking TX soft-IRQ is last user. */
915 free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link));
916
917 if (ptask->outstanding_pkts == 0)
918 list_del(&ptask->pt_link);
919
920 spin_unlock_irqrestore(&dev->lock, flags);
910 921
911 if (ptask->outstanding_pkts > 0) { 922 if (ptask->outstanding_pkts > 0) {
912 u16 dg_size; 923 u16 dg_size;
@@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
951 ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; 962 ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
952 } 963 }
953 fwnet_send_packet(ptask); 964 fwnet_send_packet(ptask);
954 } else {
955 dev_kfree_skb_any(ptask->skb);
956 kmem_cache_free(fwnet_packet_task_cache, ptask);
957 } 965 }
966
967 if (free)
968 fwnet_free_ptask(ptask);
958} 969}
959 970
960static void fwnet_write_complete(struct fw_card *card, int rcode, 971static void fwnet_write_complete(struct fw_card *card, int rcode,
@@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
977 unsigned tx_len; 988 unsigned tx_len;
978 struct rfc2734_header *bufhdr; 989 struct rfc2734_header *bufhdr;
979 unsigned long flags; 990 unsigned long flags;
991 bool free;
980 992
981 dev = ptask->dev; 993 dev = ptask->dev;
982 tx_len = ptask->max_payload; 994 tx_len = ptask->max_payload;
@@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1022 generation, SCODE_100, 0ULL, ptask->skb->data, 1034 generation, SCODE_100, 0ULL, ptask->skb->data,
1023 tx_len + 8, fwnet_write_complete, ptask); 1035 tx_len + 8, fwnet_write_complete, ptask);
1024 1036
1025 /* FIXME race? */
1026 spin_lock_irqsave(&dev->lock, flags); 1037 spin_lock_irqsave(&dev->lock, flags);
1027 list_add_tail(&ptask->pt_link, &dev->broadcasted_list); 1038
1039 /* If the AT tasklet already ran, we may be last user. */
1040 free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
1041 if (!free)
1042 list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
1043
1028 spin_unlock_irqrestore(&dev->lock, flags); 1044 spin_unlock_irqrestore(&dev->lock, flags);
1029 1045
1030 return 0; 1046 goto out;
1031 } 1047 }
1032 1048
1033 fw_send_request(dev->card, &ptask->transaction, 1049 fw_send_request(dev->card, &ptask->transaction,
@@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1035 ptask->generation, ptask->speed, ptask->fifo_addr, 1051 ptask->generation, ptask->speed, ptask->fifo_addr,
1036 ptask->skb->data, tx_len, fwnet_write_complete, ptask); 1052 ptask->skb->data, tx_len, fwnet_write_complete, ptask);
1037 1053
1038 /* FIXME race? */
1039 spin_lock_irqsave(&dev->lock, flags); 1054 spin_lock_irqsave(&dev->lock, flags);
1040 list_add_tail(&ptask->pt_link, &dev->sent_list); 1055
1056 /* If the AT tasklet already ran, we may be last user. */
1057 free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
1058 if (!free)
1059 list_add_tail(&ptask->pt_link, &dev->sent_list);
1060
1041 spin_unlock_irqrestore(&dev->lock, flags); 1061 spin_unlock_irqrestore(&dev->lock, flags);
1042 1062
1043 dev->netdev->trans_start = jiffies; 1063 dev->netdev->trans_start = jiffies;
1064 out:
1065 if (free)
1066 fwnet_free_ptask(ptask);
1044 1067
1045 return 0; 1068 return 0;
1046} 1069}
@@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1298 spin_unlock_irqrestore(&dev->lock, flags); 1321 spin_unlock_irqrestore(&dev->lock, flags);
1299 1322
1300 ptask->max_payload = max_payload; 1323 ptask->max_payload = max_payload;
1324 INIT_LIST_HEAD(&ptask->pt_link);
1325
1301 fwnet_send_packet(ptask); 1326 fwnet_send_packet(ptask);
1302 1327
1303 return NETDEV_TX_OK; 1328 return NETDEV_TX_OK;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 96768e160866..43ebf337b131 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2101 u32 payload_index, payload_end_index, next_page_index; 2101 u32 payload_index, payload_end_index, next_page_index;
2102 int page, end_page, i, length, offset; 2102 int page, end_page, i, length, offset;
2103 2103
2104 /*
2105 * FIXME: Cycle lost behavior should be configurable: lose
2106 * packet, retransmit or terminate..
2107 */
2108
2109 p = packet; 2104 p = packet;
2110 payload_index = payload; 2105 payload_index = payload;
2111 2106
@@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2135 if (!p->skip) { 2130 if (!p->skip) {
2136 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 2131 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2137 d[0].req_count = cpu_to_le16(8); 2132 d[0].req_count = cpu_to_le16(8);
2133 /*
2134 * Link the skip address to this descriptor itself. This causes
2135 * a context to skip a cycle whenever lost cycles or FIFO
2136 * overruns occur, without dropping the data. The application
2137 * should then decide whether this is an error condition or not.
2138 * FIXME: Make the context's cycle-lost behaviour configurable?
2139 */
2140 d[0].branch_address = cpu_to_le32(d_bus | z);
2138 2141
2139 header = (__le32 *) &d[1]; 2142 header = (__le32 *) &d[1];
2140 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 2143 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
@@ -2226,7 +2229,6 @@ static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2226 if (rest == 0) 2229 if (rest == 0)
2227 return -EINVAL; 2230 return -EINVAL;
2228 2231
2229 /* FIXME: make packet-per-buffer/dual-buffer a context option */
2230 while (rest > 0) { 2232 while (rest > 0) {
2231 d = context_get_descriptors(&ctx->context, 2233 d = context_get_descriptors(&ctx->context,
2232 z + header_z, &d_bus); 2234 z + header_z, &d_bus);
@@ -2421,6 +2423,7 @@ static void ohci_pmac_off(struct pci_dev *dev)
2421 2423
2422#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT 2424#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2423#define PCI_DEVICE_ID_AGERE_FW643 0x5901 2425#define PCI_DEVICE_ID_AGERE_FW643 0x5901
2426#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
2424 2427
2425static int __devinit pci_probe(struct pci_dev *dev, 2428static int __devinit pci_probe(struct pci_dev *dev,
2426 const struct pci_device_id *ent) 2429 const struct pci_device_id *ent)
@@ -2470,7 +2473,10 @@ static int __devinit pci_probe(struct pci_dev *dev,
2470 } 2473 }
2471 2474
2472 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2475 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2476#if 0
2477 /* FIXME: make it a context option or remove dual-buffer mode */
2473 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; 2478 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
2479#endif
2474 2480
2475 /* dual-buffer mode is broken if more than one IR context is active */ 2481 /* dual-buffer mode is broken if more than one IR context is active */
2476 if (dev->vendor == PCI_VENDOR_ID_AGERE && 2482 if (dev->vendor == PCI_VENDOR_ID_AGERE &&
@@ -2486,7 +2492,8 @@ static int __devinit pci_probe(struct pci_dev *dev,
2486#if !defined(CONFIG_X86_32) 2492#if !defined(CONFIG_X86_32)
2487 /* dual-buffer mode is broken with descriptor addresses above 2G */ 2493 /* dual-buffer mode is broken with descriptor addresses above 2G */
2488 if (dev->vendor == PCI_VENDOR_ID_TI && 2494 if (dev->vendor == PCI_VENDOR_ID_TI &&
2489 dev->device == PCI_DEVICE_ID_TI_TSB43AB22) 2495 (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
2496 dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
2490 ohci->use_dualbuffer = false; 2497 ohci->use_dualbuffer = false;
2491#endif 2498#endif
2492 2499
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index a019b49ecc9b..1f1d88ae68d6 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -172,6 +172,15 @@ config GPIO_ADP5520
172 To compile this driver as a module, choose M here: the module will 172 To compile this driver as a module, choose M here: the module will
173 be called adp5520-gpio. 173 be called adp5520-gpio.
174 174
175config GPIO_ADP5588
176 tristate "ADP5588 I2C GPIO expander"
177 depends on I2C
178 help
179 This option enables support for 18 GPIOs found
180 on Analog Devices ADP5588 GPIO Expanders.
181 To compile this driver as a module, choose M here: the module will be
182 called adp5588-gpio.
183
175comment "PCI GPIO expanders:" 184comment "PCI GPIO expanders:"
176 185
177config GPIO_CS5535 186config GPIO_CS5535
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 52fe4cf734c7..48687238edb1 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -5,6 +5,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
5obj-$(CONFIG_GPIOLIB) += gpiolib.o 5obj-$(CONFIG_GPIOLIB) += gpiolib.o
6 6
7obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o 7obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
8obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
8obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o 9obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
9obj-$(CONFIG_GPIO_MAX7301) += max7301.o 10obj-$(CONFIG_GPIO_MAX7301) += max7301.o
10obj-$(CONFIG_GPIO_MAX732X) += max732x.o 11obj-$(CONFIG_GPIO_MAX732X) += max732x.o
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c
new file mode 100644
index 000000000000..afc097a16b33
--- /dev/null
+++ b/drivers/gpio/adp5588-gpio.c
@@ -0,0 +1,266 @@
1/*
2 * GPIO Chip driver for Analog Devices
3 * ADP5588 I/O Expander and QWERTY Keypad Controller
4 *
5 * Copyright 2009 Analog Devices Inc.
6 *
7 * Licensed under the GPL-2 or later.
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/i2c.h>
14#include <linux/gpio.h>
15
16#include <linux/i2c/adp5588.h>
17
18#define DRV_NAME "adp5588-gpio"
19#define MAXGPIO 18
20#define ADP_BANK(offs) ((offs) >> 3)
21#define ADP_BIT(offs) (1u << ((offs) & 0x7))
22
23struct adp5588_gpio {
24 struct i2c_client *client;
25 struct gpio_chip gpio_chip;
26 struct mutex lock; /* protect cached dir, dat_out */
27 unsigned gpio_start;
28 uint8_t dat_out[3];
29 uint8_t dir[3];
30};
31
32static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
33{
34 int ret = i2c_smbus_read_byte_data(client, reg);
35
36 if (ret < 0)
37 dev_err(&client->dev, "Read Error\n");
38
39 return ret;
40}
41
42static int adp5588_gpio_write(struct i2c_client *client, u8 reg, u8 val)
43{
44 int ret = i2c_smbus_write_byte_data(client, reg, val);
45
46 if (ret < 0)
47 dev_err(&client->dev, "Write Error\n");
48
49 return ret;
50}
51
52static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
53{
54 struct adp5588_gpio *dev =
55 container_of(chip, struct adp5588_gpio, gpio_chip);
56
57 return !!(adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + ADP_BANK(off))
58 & ADP_BIT(off));
59}
60
61static void adp5588_gpio_set_value(struct gpio_chip *chip,
62 unsigned off, int val)
63{
64 unsigned bank, bit;
65 struct adp5588_gpio *dev =
66 container_of(chip, struct adp5588_gpio, gpio_chip);
67
68 bank = ADP_BANK(off);
69 bit = ADP_BIT(off);
70
71 mutex_lock(&dev->lock);
72 if (val)
73 dev->dat_out[bank] |= bit;
74 else
75 dev->dat_out[bank] &= ~bit;
76
77 adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank,
78 dev->dat_out[bank]);
79 mutex_unlock(&dev->lock);
80}
81
82static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
83{
84 int ret;
85 unsigned bank;
86 struct adp5588_gpio *dev =
87 container_of(chip, struct adp5588_gpio, gpio_chip);
88
89 bank = ADP_BANK(off);
90
91 mutex_lock(&dev->lock);
92 dev->dir[bank] &= ~ADP_BIT(off);
93 ret = adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, dev->dir[bank]);
94 mutex_unlock(&dev->lock);
95
96 return ret;
97}
98
99static int adp5588_gpio_direction_output(struct gpio_chip *chip,
100 unsigned off, int val)
101{
102 int ret;
103 unsigned bank, bit;
104 struct adp5588_gpio *dev =
105 container_of(chip, struct adp5588_gpio, gpio_chip);
106
107 bank = ADP_BANK(off);
108 bit = ADP_BIT(off);
109
110 mutex_lock(&dev->lock);
111 dev->dir[bank] |= bit;
112
113 if (val)
114 dev->dat_out[bank] |= bit;
115 else
116 dev->dat_out[bank] &= ~bit;
117
118 ret = adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank,
119 dev->dat_out[bank]);
120 ret |= adp5588_gpio_write(dev->client, GPIO_DIR1 + bank,
121 dev->dir[bank]);
122 mutex_unlock(&dev->lock);
123
124 return ret;
125}
126
127static int __devinit adp5588_gpio_probe(struct i2c_client *client,
128 const struct i2c_device_id *id)
129{
130 struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
131 struct adp5588_gpio *dev;
132 struct gpio_chip *gc;
133 int ret, i, revid;
134
135 if (pdata == NULL) {
136 dev_err(&client->dev, "missing platform data\n");
137 return -ENODEV;
138 }
139
140 if (!i2c_check_functionality(client->adapter,
141 I2C_FUNC_SMBUS_BYTE_DATA)) {
142 dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
143 return -EIO;
144 }
145
146 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
147 if (dev == NULL) {
148 dev_err(&client->dev, "failed to alloc memory\n");
149 return -ENOMEM;
150 }
151
152 dev->client = client;
153
154 gc = &dev->gpio_chip;
155 gc->direction_input = adp5588_gpio_direction_input;
156 gc->direction_output = adp5588_gpio_direction_output;
157 gc->get = adp5588_gpio_get_value;
158 gc->set = adp5588_gpio_set_value;
159 gc->can_sleep = 1;
160
161 gc->base = pdata->gpio_start;
162 gc->ngpio = MAXGPIO;
163 gc->label = client->name;
164 gc->owner = THIS_MODULE;
165
166 mutex_init(&dev->lock);
167
168
169 ret = adp5588_gpio_read(dev->client, DEV_ID);
170 if (ret < 0)
171 goto err;
172
173 revid = ret & ADP5588_DEVICE_ID_MASK;
174
175 for (i = 0, ret = 0; i <= ADP_BANK(MAXGPIO); i++) {
176 dev->dat_out[i] = adp5588_gpio_read(client, GPIO_DAT_OUT1 + i);
177 dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i);
178 ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0);
179 ret |= adp5588_gpio_write(client, GPIO_PULL1 + i,
180 (pdata->pullup_dis_mask >> (8 * i)) & 0xFF);
181
182 if (ret)
183 goto err;
184 }
185
186 ret = gpiochip_add(&dev->gpio_chip);
187 if (ret)
188 goto err;
189
190 dev_info(&client->dev, "gpios %d..%d on a %s Rev. %d\n",
191 gc->base, gc->base + gc->ngpio - 1,
192 client->name, revid);
193
194 if (pdata->setup) {
195 ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
196 if (ret < 0)
197 dev_warn(&client->dev, "setup failed, %d\n", ret);
198 }
199
200 i2c_set_clientdata(client, dev);
201 return 0;
202
203err:
204 kfree(dev);
205 return ret;
206}
207
208static int __devexit adp5588_gpio_remove(struct i2c_client *client)
209{
210 struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
211 struct adp5588_gpio *dev = i2c_get_clientdata(client);
212 int ret;
213
214 if (pdata->teardown) {
215 ret = pdata->teardown(client,
216 dev->gpio_chip.base, dev->gpio_chip.ngpio,
217 pdata->context);
218 if (ret < 0) {
219 dev_err(&client->dev, "teardown failed %d\n", ret);
220 return ret;
221 }
222 }
223
224 ret = gpiochip_remove(&dev->gpio_chip);
225 if (ret) {
226 dev_err(&client->dev, "gpiochip_remove failed %d\n", ret);
227 return ret;
228 }
229
230 kfree(dev);
231 return 0;
232}
233
234static const struct i2c_device_id adp5588_gpio_id[] = {
235 {DRV_NAME, 0},
236 {}
237};
238
239MODULE_DEVICE_TABLE(i2c, adp5588_gpio_id);
240
241static struct i2c_driver adp5588_gpio_driver = {
242 .driver = {
243 .name = DRV_NAME,
244 },
245 .probe = adp5588_gpio_probe,
246 .remove = __devexit_p(adp5588_gpio_remove),
247 .id_table = adp5588_gpio_id,
248};
249
250static int __init adp5588_gpio_init(void)
251{
252 return i2c_add_driver(&adp5588_gpio_driver);
253}
254
255module_init(adp5588_gpio_init);
256
257static void __exit adp5588_gpio_exit(void)
258{
259 i2c_del_driver(&adp5588_gpio_driver);
260}
261
262module_exit(adp5588_gpio_exit);
263
264MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
265MODULE_DESCRIPTION("GPIO ADP5588 Driver");
266MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a25ad284a272..350842ad3632 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -858,8 +858,6 @@ int gpio_sysfs_set_active_low(unsigned gpio, int value)
858 desc = &gpio_desc[gpio]; 858 desc = &gpio_desc[gpio];
859 859
860 if (test_bit(FLAG_EXPORT, &desc->flags)) { 860 if (test_bit(FLAG_EXPORT, &desc->flags)) {
861 struct device *dev;
862
863 dev = class_find_device(&gpio_class, NULL, desc, match_export); 861 dev = class_find_device(&gpio_class, NULL, desc, match_export);
864 if (dev == NULL) { 862 if (dev == NULL) {
865 status = -ENODEV; 863 status = -ENODEV;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 96eddd17e050..305c59003963 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -66,6 +66,8 @@ config DRM_RADEON
66 66
67 If M is selected, the module will be called radeon. 67 If M is selected, the module will be called radeon.
68 68
69source "drivers/gpu/drm/radeon/Kconfig"
70
69config DRM_I810 71config DRM_I810
70 tristate "Intel I810" 72 tristate "Intel I810"
71 depends on DRM && AGP && AGP_INTEL 73 depends on DRM && AGP && AGP_INTEL
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 628eae3e9b83..17be051b7aa3 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -39,8 +39,7 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
39 struct drm_ati_pcigart_info *gart_info) 39 struct drm_ati_pcigart_info *gart_info)
40{ 40{
41 gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, 41 gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
42 PAGE_SIZE, 42 PAGE_SIZE);
43 gart_info->table_mask);
44 if (gart_info->table_handle == NULL) 43 if (gart_info->table_handle == NULL)
45 return -ENOMEM; 44 return -ENOMEM;
46 45
@@ -112,6 +111,13 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
112 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { 111 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
113 DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); 112 DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
114 113
114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
115 DRM_ERROR("fail to set dma mask to 0x%Lx\n",
116 (unsigned long long)gart_info->table_mask);
117 ret = 1;
118 goto done;
119 }
120
115 ret = drm_ati_alloc_pcigart_table(dev, gart_info); 121 ret = drm_ati_alloc_pcigart_table(dev, gart_info);
116 if (ret) { 122 if (ret) {
117 DRM_ERROR("cannot allocate PCI GART page!\n"); 123 DRM_ERROR("cannot allocate PCI GART page!\n");
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3d09e304f6f4..8417cc4c43f1 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
326 * As we're limiting the address to 2^32-1 (or less), 326 * As we're limiting the address to 2^32-1 (or less),
327 * casting it down to 32 bits is no problem, but we 327 * casting it down to 32 bits is no problem, but we
328 * need to point to a 64bit variable first. */ 328 * need to point to a 64bit variable first. */
329 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); 329 dmah = drm_pci_alloc(dev, map->size, map->size);
330 if (!dmah) { 330 if (!dmah) {
331 kfree(map); 331 kfree(map);
332 return -ENOMEM; 332 return -ENOMEM;
@@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
885 885
886 while (entry->buf_count < count) { 886 while (entry->buf_count < count) {
887 887
888 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); 888 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
889 889
890 if (!dmah) { 890 if (!dmah) {
891 /* Set count correctly so we free the proper amount. */ 891 /* Set count correctly so we free the proper amount. */
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5124401f266a..d91fb8c0b7b3 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -158,6 +158,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
158 { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, 158 { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
159 { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, 159 { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
160 { DRM_MODE_CONNECTOR_TV, "TV", 0 }, 160 { DRM_MODE_CONNECTOR_TV, "TV", 0 },
161 { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
161}; 162};
162 163
163static struct drm_prop_enum_list drm_encoder_enum_list[] = 164static struct drm_prop_enum_list drm_encoder_enum_list[] =
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 4231d6db72ec..7d0f00a935fa 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -216,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
216EXPORT_SYMBOL(drm_helper_crtc_in_use); 216EXPORT_SYMBOL(drm_helper_crtc_in_use);
217 217
218/** 218/**
219 * drm_disable_unused_functions - disable unused objects 219 * drm_helper_disable_unused_functions - disable unused objects
220 * @dev: DRM device 220 * @dev: DRM device
221 * 221 *
222 * LOCKING: 222 * LOCKING:
@@ -702,7 +702,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
702 if (encoder->crtc != crtc) 702 if (encoder->crtc != crtc)
703 continue; 703 continue;
704 704
705 DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder), 705 DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
706 mode->name, mode->base.id); 706 mode->name, mode->base.id);
707 encoder_funcs = encoder->helper_private; 707 encoder_funcs = encoder->helper_private;
708 encoder_funcs->mode_set(encoder, mode, adjusted_mode); 708 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
@@ -1032,7 +1032,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
1032 /* 1032 /*
1033 * we shouldn't end up with no modes here. 1033 * we shouldn't end up with no modes here.
1034 */ 1034 */
1035 WARN(!count, "No connectors reported connected with modes\n"); 1035 if (count == 0)
1036 printk(KERN_INFO "No connectors reported connected with modes\n");
1036 1037
1037 drm_setup_crtcs(dev); 1038 drm_setup_crtcs(dev);
1038 1039
@@ -1162,6 +1163,9 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
1162int drm_helper_resume_force_mode(struct drm_device *dev) 1163int drm_helper_resume_force_mode(struct drm_device *dev)
1163{ 1164{
1164 struct drm_crtc *crtc; 1165 struct drm_crtc *crtc;
1166 struct drm_encoder *encoder;
1167 struct drm_encoder_helper_funcs *encoder_funcs;
1168 struct drm_crtc_helper_funcs *crtc_funcs;
1165 int ret; 1169 int ret;
1166 1170
1167 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1171 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1174,6 +1178,25 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
1174 1178
1175 if (ret == false) 1179 if (ret == false)
1176 DRM_ERROR("failed to set mode on crtc %p\n", crtc); 1180 DRM_ERROR("failed to set mode on crtc %p\n", crtc);
1181
1182 /* Turn off outputs that were already powered off */
1183 if (drm_helper_choose_crtc_dpms(crtc)) {
1184 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1185
1186 if(encoder->crtc != crtc)
1187 continue;
1188
1189 encoder_funcs = encoder->helper_private;
1190 if (encoder_funcs->dpms)
1191 (*encoder_funcs->dpms) (encoder,
1192 drm_helper_choose_encoder_dpms(encoder));
1193
1194 crtc_funcs = crtc->helper_private;
1195 if (crtc_funcs->dpms)
1196 (*crtc_funcs->dpms) (crtc,
1197 drm_helper_choose_crtc_dpms(crtc));
1198 }
1199 }
1177 } 1200 }
1178 /* disable the unused connectors while restoring the modesetting */ 1201 /* disable the unused connectors while restoring the modesetting */
1179 drm_helper_disable_unused_functions(dev); 1202 drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5c9f79877cbf..ab6c97330412 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
598 return mode; 598 return mode;
599} 599}
600 600
601/*
602 * EDID is delightfully ambiguous about how interlaced modes are to be
603 * encoded. Our internal representation is of frame height, but some
604 * HDTV detailed timings are encoded as field height.
605 *
606 * The format list here is from CEA, in frame size. Technically we
607 * should be checking refresh rate too. Whatever.
608 */
609static void
610drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
611 struct detailed_pixel_timing *pt)
612{
613 int i;
614 static const struct {
615 int w, h;
616 } cea_interlaced[] = {
617 { 1920, 1080 },
618 { 720, 480 },
619 { 1440, 480 },
620 { 2880, 480 },
621 { 720, 576 },
622 { 1440, 576 },
623 { 2880, 576 },
624 };
625 static const int n_sizes =
626 sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
627
628 if (!(pt->misc & DRM_EDID_PT_INTERLACED))
629 return;
630
631 for (i = 0; i < n_sizes; i++) {
632 if ((mode->hdisplay == cea_interlaced[i].w) &&
633 (mode->vdisplay == cea_interlaced[i].h / 2)) {
634 mode->vdisplay *= 2;
635 mode->vsync_start *= 2;
636 mode->vsync_end *= 2;
637 mode->vtotal *= 2;
638 mode->vtotal |= 1;
639 }
640 }
641
642 mode->flags |= DRM_MODE_FLAG_INTERLACE;
643}
644
601/** 645/**
602 * drm_mode_detailed - create a new mode from an EDID detailed timing section 646 * drm_mode_detailed - create a new mode from an EDID detailed timing section
603 * @dev: DRM device (needed to create new mode) 647 * @dev: DRM device (needed to create new mode)
@@ -633,8 +677,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
633 return NULL; 677 return NULL;
634 } 678 }
635 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { 679 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
636 printk(KERN_WARNING "integrated sync not supported\n"); 680 printk(KERN_WARNING "composite sync not supported\n");
637 return NULL;
638 } 681 }
639 682
640 /* it is incorrect if hsync/vsync width is zero */ 683 /* it is incorrect if hsync/vsync width is zero */
@@ -681,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
681 724
682 drm_mode_set_name(mode); 725 drm_mode_set_name(mode);
683 726
684 if (pt->misc & DRM_EDID_PT_INTERLACED) 727 drm_mode_do_interlace_quirk(mode, pt);
685 mode->flags |= DRM_MODE_FLAG_INTERLACE;
686 728
687 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 729 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
688 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; 730 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
@@ -911,23 +953,27 @@ static int drm_cvt_modes(struct drm_connector *connector,
911 struct drm_device *dev = connector->dev; 953 struct drm_device *dev = connector->dev;
912 struct cvt_timing *cvt; 954 struct cvt_timing *cvt;
913 const int rates[] = { 60, 85, 75, 60, 50 }; 955 const int rates[] = { 60, 85, 75, 60, 50 };
956 const u8 empty[3] = { 0, 0, 0 };
914 957
915 for (i = 0; i < 4; i++) { 958 for (i = 0; i < 4; i++) {
916 int uninitialized_var(width), height; 959 int uninitialized_var(width), height;
917 cvt = &(timing->data.other_data.data.cvt[i]); 960 cvt = &(timing->data.other_data.data.cvt[i]);
918 961
919 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; 962 if (!memcmp(cvt->code, empty, 3))
920 switch (cvt->code[1] & 0xc0) { 963 continue;
964
965 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
966 switch (cvt->code[1] & 0x0c) {
921 case 0x00: 967 case 0x00:
922 width = height * 4 / 3; 968 width = height * 4 / 3;
923 break; 969 break;
924 case 0x40: 970 case 0x04:
925 width = height * 16 / 9; 971 width = height * 16 / 9;
926 break; 972 break;
927 case 0x80: 973 case 0x08:
928 width = height * 16 / 10; 974 width = height * 16 / 10;
929 break; 975 break;
930 case 0xc0: 976 case 0x0c:
931 width = height * 15 / 9; 977 width = height * 15 / 9;
932 break; 978 break;
933 } 979 }
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1b49fa055f4f..0f9e90552dc4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -156,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
156 force = DRM_FORCE_ON; 156 force = DRM_FORCE_ON;
157 break; 157 break;
158 case 'D': 158 case 'D':
159 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) || 159 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
160 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) 160 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
161 force = DRM_FORCE_ON; 161 force = DRM_FORCE_ON;
162 else 162 else
@@ -389,7 +389,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
389 break; 389 break;
390 /* Display: Off; HSync: On, VSync: On */ 390 /* Display: Off; HSync: On, VSync: On */
391 case FB_BLANK_NORMAL: 391 case FB_BLANK_NORMAL:
392 drm_fb_helper_off(info, DRM_MODE_DPMS_ON); 392 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
393 break; 393 break;
394 /* Display: Off; HSync: Off, VSync: On */ 394 /* Display: Off; HSync: Off, VSync: On */
395 case FB_BLANK_HSYNC_SUSPEND: 395 case FB_BLANK_HSYNC_SUSPEND:
@@ -606,11 +606,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
606 return -EINVAL; 606 return -EINVAL;
607 607
608 /* Need to resize the fb object !!! */ 608 /* Need to resize the fb object !!! */
609 if (var->xres > fb->width || var->yres > fb->height) { 609 if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
610 DRM_ERROR("Requested width/height is greater than current fb " 610 DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
611 "object %dx%d > %dx%d\n", var->xres, var->yres, 611 "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
612 fb->width, fb->height); 612 fb->width, fb->height, fb->bits_per_pixel);
613 DRM_ERROR("Need resizing code.\n");
614 return -EINVAL; 613 return -EINVAL;
615 } 614 }
616 615
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e9dbb481c469..8bf3770f294e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
142 if (IS_ERR(obj->filp)) 142 if (IS_ERR(obj->filp))
143 goto free; 143 goto free;
144 144
145 /* Basically we want to disable the OOM killer and handle ENOMEM
146 * ourselves by sacrificing pages from cached buffers.
147 * XXX shmem_file_[gs]et_gfp_mask()
148 */
149 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
150 GFP_HIGHUSER |
151 __GFP_COLD |
152 __GFP_FS |
153 __GFP_RECLAIMABLE |
154 __GFP_NORETRY |
155 __GFP_NOWARN |
156 __GFP_NOMEMALLOC);
157
158 kref_init(&obj->refcount); 145 kref_init(&obj->refcount);
159 kref_init(&obj->handlecount); 146 kref_init(&obj->handlecount);
160 obj->size = size; 147 obj->size = size;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 7998ee66b317..b98384dbd9a7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -115,6 +115,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
115 115
116 dev->num_crtcs = 0; 116 dev->num_crtcs = 0;
117} 117}
118EXPORT_SYMBOL(drm_vblank_cleanup);
118 119
119int drm_vblank_init(struct drm_device *dev, int num_crtcs) 120int drm_vblank_init(struct drm_device *dev, int num_crtcs)
120{ 121{
@@ -163,7 +164,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
163 } 164 }
164 165
165 dev->vblank_disable_allowed = 0; 166 dev->vblank_disable_allowed = 0;
166
167 return 0; 167 return 0;
168 168
169err: 169err:
@@ -493,6 +493,9 @@ EXPORT_SYMBOL(drm_vblank_off);
493 */ 493 */
494void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) 494void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
495{ 495{
496 /* vblank is not initialized (IRQ not installed ?) */
497 if (!dev->num_crtcs)
498 return;
496 /* 499 /*
497 * To avoid all the problems that might happen if interrupts 500 * To avoid all the problems that might happen if interrupts
498 * were enabled/disabled around or between these calls, we just 501 * were enabled/disabled around or between these calls, we just
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec32977129..2ac074c8f5d2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
405 wasted += alignment - tmp; 405 wasted += alignment - tmp;
406 } 406 }
407 407
408 if (entry->size >= size + wasted) { 408 if (entry->size >= size + wasted &&
409 (entry->start + wasted + size) <= end) {
409 if (!best_match) 410 if (!best_match)
410 return entry; 411 return entry;
411 if (entry->size < best_size) { 412 if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 6d81a02463a3..76d63394c776 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1,9 +1,4 @@
1/* 1/*
2 * The list_sort function is (presumably) licensed under the GPL (see the
3 * top level "COPYING" file for details).
4 *
5 * The remainder of this file is:
6 *
7 * Copyright © 1997-2003 by The XFree86 Project, Inc. 2 * Copyright © 1997-2003 by The XFree86 Project, Inc.
8 * Copyright © 2007 Dave Airlie 3 * Copyright © 2007 Dave Airlie
9 * Copyright © 2007-2008 Intel Corporation 4 * Copyright © 2007-2008 Intel Corporation
@@ -36,6 +31,7 @@
36 */ 31 */
37 32
38#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/list_sort.h>
39#include "drmP.h" 35#include "drmP.h"
40#include "drm.h" 36#include "drm.h"
41#include "drm_crtc.h" 37#include "drm_crtc.h"
@@ -855,6 +851,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
855 851
856/** 852/**
857 * drm_mode_compare - compare modes for favorability 853 * drm_mode_compare - compare modes for favorability
854 * @priv: unused
858 * @lh_a: list_head for first mode 855 * @lh_a: list_head for first mode
859 * @lh_b: list_head for second mode 856 * @lh_b: list_head for second mode
860 * 857 *
@@ -868,7 +865,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
868 * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or 865 * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
869 * positive if @lh_b is better than @lh_a. 866 * positive if @lh_b is better than @lh_a.
870 */ 867 */
871static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b) 868static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
872{ 869{
873 struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); 870 struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
874 struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); 871 struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
@@ -885,85 +882,6 @@ static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
885 return diff; 882 return diff;
886} 883}
887 884
888/* FIXME: what we don't have a list sort function? */
889/* list sort from Mark J Roberts (mjr@znex.org) */
890void list_sort(struct list_head *head,
891 int (*cmp)(struct list_head *a, struct list_head *b))
892{
893 struct list_head *p, *q, *e, *list, *tail, *oldhead;
894 int insize, nmerges, psize, qsize, i;
895
896 list = head->next;
897 list_del(head);
898 insize = 1;
899 for (;;) {
900 p = oldhead = list;
901 list = tail = NULL;
902 nmerges = 0;
903
904 while (p) {
905 nmerges++;
906 q = p;
907 psize = 0;
908 for (i = 0; i < insize; i++) {
909 psize++;
910 q = q->next == oldhead ? NULL : q->next;
911 if (!q)
912 break;
913 }
914
915 qsize = insize;
916 while (psize > 0 || (qsize > 0 && q)) {
917 if (!psize) {
918 e = q;
919 q = q->next;
920 qsize--;
921 if (q == oldhead)
922 q = NULL;
923 } else if (!qsize || !q) {
924 e = p;
925 p = p->next;
926 psize--;
927 if (p == oldhead)
928 p = NULL;
929 } else if (cmp(p, q) <= 0) {
930 e = p;
931 p = p->next;
932 psize--;
933 if (p == oldhead)
934 p = NULL;
935 } else {
936 e = q;
937 q = q->next;
938 qsize--;
939 if (q == oldhead)
940 q = NULL;
941 }
942 if (tail)
943 tail->next = e;
944 else
945 list = e;
946 e->prev = tail;
947 tail = e;
948 }
949 p = q;
950 }
951
952 tail->next = list;
953 list->prev = tail;
954
955 if (nmerges <= 1)
956 break;
957
958 insize *= 2;
959 }
960
961 head->next = list;
962 head->prev = list->prev;
963 list->prev->next = head;
964 list->prev = head;
965}
966
967/** 885/**
968 * drm_mode_sort - sort mode list 886 * drm_mode_sort - sort mode list
969 * @mode_list: list to sort 887 * @mode_list: list to sort
@@ -975,7 +893,7 @@ void list_sort(struct list_head *head,
975 */ 893 */
976void drm_mode_sort(struct list_head *mode_list) 894void drm_mode_sort(struct list_head *mode_list)
977{ 895{
978 list_sort(mode_list, drm_mode_compare); 896 list_sort(NULL, mode_list, drm_mode_compare);
979} 897}
980EXPORT_SYMBOL(drm_mode_sort); 898EXPORT_SYMBOL(drm_mode_sort);
981 899
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 577094fb1995..e68ebf92fa2a 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -47,8 +47,7 @@
47/** 47/**
48 * \brief Allocate a PCI consistent memory block, for DMA. 48 * \brief Allocate a PCI consistent memory block, for DMA.
49 */ 49 */
50drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, 50drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
51 dma_addr_t maxaddr)
52{ 51{
53 drm_dma_handle_t *dmah; 52 drm_dma_handle_t *dmah;
54#if 1 53#if 1
@@ -63,11 +62,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
63 if (align > size) 62 if (align > size)
64 return NULL; 63 return NULL;
65 64
66 if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
67 DRM_ERROR("Setting pci dma mask failed\n");
68 return NULL;
69 }
70
71 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); 65 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
72 if (!dmah) 66 if (!dmah)
73 return NULL; 67 return NULL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 18476bf0b580..a894ade03093 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
272 mem = kmap_atomic(pages[page], KM_USER0); 272 mem = kmap_atomic(pages[page], KM_USER0);
273 for (i = 0; i < PAGE_SIZE; i += 4) 273 for (i = 0; i < PAGE_SIZE; i += 4)
274 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 274 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
275 kunmap_atomic(pages[page], KM_USER0); 275 kunmap_atomic(mem, KM_USER0);
276 } 276 }
277} 277}
278 278
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
291 obj = obj_priv->obj; 291 obj = obj_priv->obj;
292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
293 ret = i915_gem_object_get_pages(obj); 293 ret = i915_gem_object_get_pages(obj, 0);
294 if (ret) { 294 if (ret) {
295 DRM_ERROR("Failed to get pages: %d\n", ret); 295 DRM_ERROR("Failed to get pages: %d\n", ret);
296 spin_unlock(&dev_priv->mm.active_list_lock); 296 spin_unlock(&dev_priv->mm.active_list_lock);
@@ -386,34 +386,6 @@ out:
386 return 0; 386 return 0;
387} 387}
388 388
389static int i915_registers_info(struct seq_file *m, void *data) {
390 struct drm_info_node *node = (struct drm_info_node *) m->private;
391 struct drm_device *dev = node->minor->dev;
392 drm_i915_private_t *dev_priv = dev->dev_private;
393 uint32_t reg;
394
395#define DUMP_RANGE(start, end) \
396 for (reg=start; reg < end; reg += 4) \
397 seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
398
399 DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
400 DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
401 DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
402 DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
403 DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
404 DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
405 DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
406 DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
407 DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
408 DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
409 DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
410 DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
411 DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
412 DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
413
414 return 0;
415}
416
417static int 389static int
418i915_wedged_open(struct inode *inode, 390i915_wedged_open(struct inode *inode,
419 struct file *filp) 391 struct file *filp)
@@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
519} 491}
520 492
521static struct drm_info_list i915_debugfs_list[] = { 493static struct drm_info_list i915_debugfs_list[] = {
522 {"i915_regs", i915_registers_info, 0},
523 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 494 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
524 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 495 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
525 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 496 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 701bfeac7f57..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
123 drm_i915_private_t *dev_priv = dev->dev_private; 123 drm_i915_private_t *dev_priv = dev->dev_private;
124 /* Program Hardware Status Page */ 124 /* Program Hardware Status Page */
125 dev_priv->status_page_dmah = 125 dev_priv->status_page_dmah =
126 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); 126 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
127 127
128 if (!dev_priv->status_page_dmah) { 128 if (!dev_priv->status_page_dmah) {
129 DRM_ERROR("Can not allocate hardware status page\n"); 129 DRM_ERROR("Can not allocate hardware status page\n");
@@ -134,6 +134,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
134 134
135 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 135 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
136 136
137 if (IS_I965G(dev))
138 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
139 0xf0;
140
137 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 141 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
138 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 142 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
139 return 0; 143 return 0;
@@ -731,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
731 if (cmdbuf->num_cliprects) { 735 if (cmdbuf->num_cliprects) {
732 cliprects = kcalloc(cmdbuf->num_cliprects, 736 cliprects = kcalloc(cmdbuf->num_cliprects,
733 sizeof(struct drm_clip_rect), GFP_KERNEL); 737 sizeof(struct drm_clip_rect), GFP_KERNEL);
734 if (cliprects == NULL) 738 if (cliprects == NULL) {
739 ret = -ENOMEM;
735 goto fail_batch_free; 740 goto fail_batch_free;
741 }
736 742
737 ret = copy_from_user(cliprects, cmdbuf->cliprects, 743 ret = copy_from_user(cliprects, cmdbuf->cliprects,
738 cmdbuf->num_cliprects * 744 cmdbuf->num_cliprects *
@@ -813,9 +819,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
813 case I915_PARAM_HAS_PAGEFLIPPING: 819 case I915_PARAM_HAS_PAGEFLIPPING:
814 value = 1; 820 value = 1;
815 break; 821 break;
822 case I915_PARAM_HAS_EXECBUF2:
823 /* depends on GEM */
824 value = dev_priv->has_gem;
825 break;
816 default: 826 default:
817 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 827 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
818 param->param); 828 param->param);
819 return -EINVAL; 829 return -EINVAL;
820 } 830 }
821 831
@@ -1117,7 +1127,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1117{ 1127{
1118 struct drm_i915_private *dev_priv = dev->dev_private; 1128 struct drm_i915_private *dev_priv = dev->dev_private;
1119 struct drm_mm_node *compressed_fb, *compressed_llb; 1129 struct drm_mm_node *compressed_fb, *compressed_llb;
1120 unsigned long cfb_base, ll_base; 1130 unsigned long cfb_base;
1131 unsigned long ll_base = 0;
1121 1132
1122 /* Leave 1M for line length buffer & misc. */ 1133 /* Leave 1M for line length buffer & misc. */
1123 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1134 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
@@ -1200,14 +1211,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
1200 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 1211 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
1201 0xff000000; 1212 0xff000000;
1202 1213
1203 if (IS_MOBILE(dev) || IS_I9XX(dev))
1204 dev_priv->cursor_needs_physical = true;
1205 else
1206 dev_priv->cursor_needs_physical = false;
1207
1208 if (IS_I965G(dev) || IS_G33(dev))
1209 dev_priv->cursor_needs_physical = false;
1210
1211 /* Basic memrange allocator for stolen space (aka vram) */ 1214 /* Basic memrange allocator for stolen space (aka vram) */
1212 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1215 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1213 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); 1216 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
@@ -1257,6 +1260,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
1257 if (ret) 1260 if (ret)
1258 goto destroy_ringbuffer; 1261 goto destroy_ringbuffer;
1259 1262
1263 intel_modeset_init(dev);
1264
1260 ret = drm_irq_install(dev); 1265 ret = drm_irq_install(dev);
1261 if (ret) 1266 if (ret)
1262 goto destroy_ringbuffer; 1267 goto destroy_ringbuffer;
@@ -1271,8 +1276,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
1271 1276
1272 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1277 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1273 1278
1274 intel_modeset_init(dev);
1275
1276 drm_helper_initial_config(dev); 1279 drm_helper_initial_config(dev);
1277 1280
1278 return 0; 1281 return 0;
@@ -1360,7 +1363,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1360{ 1363{
1361 struct drm_i915_private *dev_priv = dev->dev_private; 1364 struct drm_i915_private *dev_priv = dev->dev_private;
1362 resource_size_t base, size; 1365 resource_size_t base, size;
1363 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1366 int ret = 0, mmio_bar;
1364 uint32_t agp_size, prealloc_size, prealloc_start; 1367 uint32_t agp_size, prealloc_size, prealloc_start;
1365 1368
1366 /* i915 has 4 more counters */ 1369 /* i915 has 4 more counters */
@@ -1376,8 +1379,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1376 1379
1377 dev->dev_private = (void *)dev_priv; 1380 dev->dev_private = (void *)dev_priv;
1378 dev_priv->dev = dev; 1381 dev_priv->dev = dev;
1382 dev_priv->info = (struct intel_device_info *) flags;
1379 1383
1380 /* Add register map (needed for suspend/resume) */ 1384 /* Add register map (needed for suspend/resume) */
1385 mmio_bar = IS_I9XX(dev) ? 0 : 1;
1381 base = drm_get_resource_start(dev, mmio_bar); 1386 base = drm_get_resource_start(dev, mmio_bar);
1382 size = drm_get_resource_len(dev, mmio_bar); 1387 size = drm_get_resource_len(dev, mmio_bar);
1383 1388
@@ -1652,6 +1657,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
1652 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1657 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1653 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1658 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1654 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 1659 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1660 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
1655 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1661 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1656 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1662 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1657 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), 1663 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 24286ca168fc..79beffcf5936 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -33,7 +33,6 @@
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35 35
36#include "drm_pciids.h"
37#include <linux/console.h> 36#include <linux/console.h>
38#include "drm_crtc_helper.h" 37#include "drm_crtc_helper.h"
39 38
@@ -46,88 +45,229 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
46unsigned int i915_powersave = 1; 45unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0400); 46module_param_named(powersave, i915_powersave, int, 0400);
48 47
48unsigned int i915_lvds_downclock = 0;
49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
50
49static struct drm_driver driver; 51static struct drm_driver driver;
50 52
51static struct pci_device_id pciidlist[] = { 53#define INTEL_VGA_DEVICE(id, info) { \
52 i915_PCI_IDS 54 .class = PCI_CLASS_DISPLAY_VGA << 8, \
55 .class_mask = 0xffff00, \
56 .vendor = 0x8086, \
57 .device = id, \
58 .subvendor = PCI_ANY_ID, \
59 .subdevice = PCI_ANY_ID, \
60 .driver_data = (unsigned long) info }
61
62const static struct intel_device_info intel_i830_info = {
63 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
64};
65
66const static struct intel_device_info intel_845g_info = {
67 .is_i8xx = 1,
68};
69
70const static struct intel_device_info intel_i85x_info = {
71 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
72};
73
74const static struct intel_device_info intel_i865g_info = {
75 .is_i8xx = 1,
76};
77
78const static struct intel_device_info intel_i915g_info = {
79 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
80};
81const static struct intel_device_info intel_i915gm_info = {
82 .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
83 .cursor_needs_physical = 1,
84};
85const static struct intel_device_info intel_i945g_info = {
86 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
87};
88const static struct intel_device_info intel_i945gm_info = {
89 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
90 .has_hotplug = 1, .cursor_needs_physical = 1,
91};
92
93const static struct intel_device_info intel_i965g_info = {
94 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
95};
96
97const static struct intel_device_info intel_i965gm_info = {
98 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
99 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
100 .has_hotplug = 1,
101};
102
103const static struct intel_device_info intel_g33_info = {
104 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
105 .has_hotplug = 1,
106};
107
108const static struct intel_device_info intel_g45_info = {
109 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
110 .has_pipe_cxsr = 1,
111 .has_hotplug = 1,
112};
113
114const static struct intel_device_info intel_gm45_info = {
115 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
116 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
117 .has_pipe_cxsr = 1,
118 .has_hotplug = 1,
119};
120
121const static struct intel_device_info intel_pineview_info = {
122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
123 .need_gfx_hws = 1,
124 .has_hotplug = 1,
125};
126
127const static struct intel_device_info intel_ironlake_d_info = {
128 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
129 .has_pipe_cxsr = 1,
130 .has_hotplug = 1,
131};
132
133const static struct intel_device_info intel_ironlake_m_info = {
134 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
135 .need_gfx_hws = 1, .has_rc6 = 1,
136 .has_hotplug = 1,
137};
138
139const static struct pci_device_id pciidlist[] = {
140 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
141 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
142 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
143 INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
144 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
145 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
146 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
147 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
148 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
149 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
150 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
151 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
152 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
153 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
154 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
155 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
156 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
157 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
158 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
159 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
160 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
161 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
162 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
163 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
164 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
165 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
166 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
167 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
168 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
169 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
170 {0, 0, 0}
53}; 171};
54 172
55#if defined(CONFIG_DRM_I915_KMS) 173#if defined(CONFIG_DRM_I915_KMS)
56MODULE_DEVICE_TABLE(pci, pciidlist); 174MODULE_DEVICE_TABLE(pci, pciidlist);
57#endif 175#endif
58 176
59static int i915_suspend(struct drm_device *dev, pm_message_t state) 177static int i915_drm_freeze(struct drm_device *dev)
60{ 178{
61 struct drm_i915_private *dev_priv = dev->dev_private;
62
63 if (!dev || !dev_priv) {
64 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
65 DRM_ERROR("DRM not initialized, aborting suspend.\n");
66 return -ENODEV;
67 }
68
69 if (state.event == PM_EVENT_PRETHAW)
70 return 0;
71
72 pci_save_state(dev->pdev); 179 pci_save_state(dev->pdev);
73 180
74 /* If KMS is active, we do the leavevt stuff here */ 181 /* If KMS is active, we do the leavevt stuff here */
75 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 182 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
76 if (i915_gem_idle(dev)) 183 int error = i915_gem_idle(dev);
184 if (error) {
77 dev_err(&dev->pdev->dev, 185 dev_err(&dev->pdev->dev,
78 "GEM idle failed, resume may fail\n"); 186 "GEM idle failed, resume might fail\n");
187 return error;
188 }
79 drm_irq_uninstall(dev); 189 drm_irq_uninstall(dev);
80 } 190 }
81 191
82 i915_save_state(dev); 192 i915_save_state(dev);
83 193
194 return 0;
195}
196
197static void i915_drm_suspend(struct drm_device *dev)
198{
199 struct drm_i915_private *dev_priv = dev->dev_private;
200
84 intel_opregion_free(dev, 1); 201 intel_opregion_free(dev, 1);
85 202
203 /* Modeset on resume, not lid events */
204 dev_priv->modeset_on_lid = 0;
205}
206
207static int i915_suspend(struct drm_device *dev, pm_message_t state)
208{
209 int error;
210
211 if (!dev || !dev->dev_private) {
212 DRM_ERROR("dev: %p\n", dev);
213 DRM_ERROR("DRM not initialized, aborting suspend.\n");
214 return -ENODEV;
215 }
216
217 if (state.event == PM_EVENT_PRETHAW)
218 return 0;
219
220 error = i915_drm_freeze(dev);
221 if (error)
222 return error;
223
224 i915_drm_suspend(dev);
225
86 if (state.event == PM_EVENT_SUSPEND) { 226 if (state.event == PM_EVENT_SUSPEND) {
87 /* Shut down the device */ 227 /* Shut down the device */
88 pci_disable_device(dev->pdev); 228 pci_disable_device(dev->pdev);
89 pci_set_power_state(dev->pdev, PCI_D3hot); 229 pci_set_power_state(dev->pdev, PCI_D3hot);
90 } 230 }
91 231
92 /* Modeset on resume, not lid events */
93 dev_priv->modeset_on_lid = 0;
94
95 return 0; 232 return 0;
96} 233}
97 234
98static int i915_resume(struct drm_device *dev) 235static int i915_drm_thaw(struct drm_device *dev)
99{ 236{
100 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
101 int ret = 0; 238 int error = 0;
102
103 if (pci_enable_device(dev->pdev))
104 return -1;
105 pci_set_master(dev->pdev);
106
107 i915_restore_state(dev);
108
109 intel_opregion_init(dev, 1);
110 239
111 /* KMS EnterVT equivalent */ 240 /* KMS EnterVT equivalent */
112 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
113 mutex_lock(&dev->struct_mutex); 242 mutex_lock(&dev->struct_mutex);
114 dev_priv->mm.suspended = 0; 243 dev_priv->mm.suspended = 0;
115 244
116 ret = i915_gem_init_ringbuffer(dev); 245 error = i915_gem_init_ringbuffer(dev);
117 if (ret != 0)
118 ret = -1;
119 mutex_unlock(&dev->struct_mutex); 246 mutex_unlock(&dev->struct_mutex);
120 247
121 drm_irq_install(dev); 248 drm_irq_install(dev);
122 } 249
123 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
124 /* Resume the modeset for every activated CRTC */ 250 /* Resume the modeset for every activated CRTC */
125 drm_helper_resume_force_mode(dev); 251 drm_helper_resume_force_mode(dev);
126 } 252 }
127 253
128 dev_priv->modeset_on_lid = 0; 254 dev_priv->modeset_on_lid = 0;
129 255
130 return ret; 256 return error;
257}
258
259static int i915_resume(struct drm_device *dev)
260{
261 if (pci_enable_device(dev->pdev))
262 return -EIO;
263
264 pci_set_master(dev->pdev);
265
266 i915_restore_state(dev);
267
268 intel_opregion_init(dev, 1);
269
270 return i915_drm_thaw(dev);
131} 271}
132 272
133/** 273/**
@@ -268,22 +408,80 @@ i915_pci_remove(struct pci_dev *pdev)
268 drm_put_dev(dev); 408 drm_put_dev(dev);
269} 409}
270 410
271static int 411static int i915_pm_suspend(struct device *dev)
272i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
273{ 412{
274 struct drm_device *dev = pci_get_drvdata(pdev); 413 struct pci_dev *pdev = to_pci_dev(dev);
414 struct drm_device *drm_dev = pci_get_drvdata(pdev);
415 int error;
275 416
276 return i915_suspend(dev, state); 417 if (!drm_dev || !drm_dev->dev_private) {
418 dev_err(dev, "DRM not initialized, aborting suspend.\n");
419 return -ENODEV;
420 }
421
422 error = i915_drm_freeze(drm_dev);
423 if (error)
424 return error;
425
426 i915_drm_suspend(drm_dev);
427
428 pci_disable_device(pdev);
429 pci_set_power_state(pdev, PCI_D3hot);
430
431 return 0;
277} 432}
278 433
279static int 434static int i915_pm_resume(struct device *dev)
280i915_pci_resume(struct pci_dev *pdev)
281{ 435{
282 struct drm_device *dev = pci_get_drvdata(pdev); 436 struct pci_dev *pdev = to_pci_dev(dev);
437 struct drm_device *drm_dev = pci_get_drvdata(pdev);
438
439 return i915_resume(drm_dev);
440}
441
442static int i915_pm_freeze(struct device *dev)
443{
444 struct pci_dev *pdev = to_pci_dev(dev);
445 struct drm_device *drm_dev = pci_get_drvdata(pdev);
446
447 if (!drm_dev || !drm_dev->dev_private) {
448 dev_err(dev, "DRM not initialized, aborting suspend.\n");
449 return -ENODEV;
450 }
283 451
284 return i915_resume(dev); 452 return i915_drm_freeze(drm_dev);
285} 453}
286 454
455static int i915_pm_thaw(struct device *dev)
456{
457 struct pci_dev *pdev = to_pci_dev(dev);
458 struct drm_device *drm_dev = pci_get_drvdata(pdev);
459
460 return i915_drm_thaw(drm_dev);
461}
462
463static int i915_pm_poweroff(struct device *dev)
464{
465 struct pci_dev *pdev = to_pci_dev(dev);
466 struct drm_device *drm_dev = pci_get_drvdata(pdev);
467 int error;
468
469 error = i915_drm_freeze(drm_dev);
470 if (!error)
471 i915_drm_suspend(drm_dev);
472
473 return error;
474}
475
476const struct dev_pm_ops i915_pm_ops = {
477 .suspend = i915_pm_suspend,
478 .resume = i915_pm_resume,
479 .freeze = i915_pm_freeze,
480 .thaw = i915_pm_thaw,
481 .poweroff = i915_pm_poweroff,
482 .restore = i915_pm_resume,
483};
484
287static struct vm_operations_struct i915_gem_vm_ops = { 485static struct vm_operations_struct i915_gem_vm_ops = {
288 .fault = i915_gem_fault, 486 .fault = i915_gem_fault,
289 .open = drm_gem_vm_open, 487 .open = drm_gem_vm_open,
@@ -303,8 +501,11 @@ static struct drm_driver driver = {
303 .lastclose = i915_driver_lastclose, 501 .lastclose = i915_driver_lastclose,
304 .preclose = i915_driver_preclose, 502 .preclose = i915_driver_preclose,
305 .postclose = i915_driver_postclose, 503 .postclose = i915_driver_postclose,
504
505 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
306 .suspend = i915_suspend, 506 .suspend = i915_suspend,
307 .resume = i915_resume, 507 .resume = i915_resume,
508
308 .device_is_agp = i915_driver_device_is_agp, 509 .device_is_agp = i915_driver_device_is_agp,
309 .enable_vblank = i915_enable_vblank, 510 .enable_vblank = i915_enable_vblank,
310 .disable_vblank = i915_disable_vblank, 511 .disable_vblank = i915_disable_vblank,
@@ -344,10 +545,7 @@ static struct drm_driver driver = {
344 .id_table = pciidlist, 545 .id_table = pciidlist,
345 .probe = i915_pci_probe, 546 .probe = i915_pci_probe,
346 .remove = i915_pci_remove, 547 .remove = i915_pci_remove,
347#ifdef CONFIG_PM 548 .driver.pm = &i915_pm_ops,
348 .resume = i915_pci_resume,
349 .suspend = i915_pci_suspend,
350#endif
351 }, 549 },
352 550
353 .name = DRIVER_NAME, 551 .name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fbecac72f5bb..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -172,9 +172,31 @@ struct drm_i915_display_funcs {
172 172
173struct intel_overlay; 173struct intel_overlay;
174 174
175struct intel_device_info {
176 u8 is_mobile : 1;
177 u8 is_i8xx : 1;
178 u8 is_i915g : 1;
179 u8 is_i9xx : 1;
180 u8 is_i945gm : 1;
181 u8 is_i965g : 1;
182 u8 is_i965gm : 1;
183 u8 is_g33 : 1;
184 u8 need_gfx_hws : 1;
185 u8 is_g4x : 1;
186 u8 is_pineview : 1;
187 u8 is_ironlake : 1;
188 u8 has_fbc : 1;
189 u8 has_rc6 : 1;
190 u8 has_pipe_cxsr : 1;
191 u8 has_hotplug : 1;
192 u8 cursor_needs_physical : 1;
193};
194
175typedef struct drm_i915_private { 195typedef struct drm_i915_private {
176 struct drm_device *dev; 196 struct drm_device *dev;
177 197
198 const struct intel_device_info *info;
199
178 int has_gem; 200 int has_gem;
179 201
180 void __iomem *regs; 202 void __iomem *regs;
@@ -232,8 +254,6 @@ typedef struct drm_i915_private {
232 int hangcheck_count; 254 int hangcheck_count;
233 uint32_t last_acthd; 255 uint32_t last_acthd;
234 256
235 bool cursor_needs_physical;
236
237 struct drm_mm vram; 257 struct drm_mm vram;
238 258
239 unsigned long cfb_size; 259 unsigned long cfb_size;
@@ -263,6 +283,7 @@ typedef struct drm_i915_private {
263 unsigned int lvds_use_ssc:1; 283 unsigned int lvds_use_ssc:1;
264 unsigned int edp_support:1; 284 unsigned int edp_support:1;
265 int lvds_ssc_freq; 285 int lvds_ssc_freq;
286 int edp_bpp;
266 287
267 struct notifier_block lid_notifier; 288 struct notifier_block lid_notifier;
268 289
@@ -287,8 +308,6 @@ typedef struct drm_i915_private {
287 u32 saveDSPACNTR; 308 u32 saveDSPACNTR;
288 u32 saveDSPBCNTR; 309 u32 saveDSPBCNTR;
289 u32 saveDSPARB; 310 u32 saveDSPARB;
290 u32 saveRENDERSTANDBY;
291 u32 savePWRCTXA;
292 u32 saveHWS; 311 u32 saveHWS;
293 u32 savePIPEACONF; 312 u32 savePIPEACONF;
294 u32 savePIPEBCONF; 313 u32 savePIPEBCONF;
@@ -474,6 +493,15 @@ typedef struct drm_i915_private {
474 struct list_head flushing_list; 493 struct list_head flushing_list;
475 494
476 /** 495 /**
496 * List of objects currently pending a GPU write flush.
497 *
498 * All elements on this list will belong to either the
499 * active_list or flushing_list, last_rendering_seqno can
500 * be used to differentiate between the two elements.
501 */
502 struct list_head gpu_write_list;
503
504 /**
477 * LRU list of objects which are not in the ringbuffer and 505 * LRU list of objects which are not in the ringbuffer and
478 * are ready to unbind, but are still in the GTT. 506 * are ready to unbind, but are still in the GTT.
479 * 507 *
@@ -561,6 +589,7 @@ typedef struct drm_i915_private {
561 u16 orig_clock; 589 u16 orig_clock;
562 int child_dev_num; 590 int child_dev_num;
563 struct child_device_config *child_dev; 591 struct child_device_config *child_dev;
592 struct drm_connector *int_lvds_connector;
564} drm_i915_private_t; 593} drm_i915_private_t;
565 594
566/** driver private structure attached to each drm_gem_object */ 595/** driver private structure attached to each drm_gem_object */
@@ -572,6 +601,8 @@ struct drm_i915_gem_object {
572 601
573 /** This object's place on the active/flushing/inactive lists */ 602 /** This object's place on the active/flushing/inactive lists */
574 struct list_head list; 603 struct list_head list;
604 /** This object's place on GPU write list */
605 struct list_head gpu_write_list;
575 606
576 /** This object's place on the fenced object LRU */ 607 /** This object's place on the fenced object LRU */
577 struct list_head fence_list; 608 struct list_head fence_list;
@@ -703,6 +734,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
703extern int i915_max_ioctl; 734extern int i915_max_ioctl;
704extern unsigned int i915_fbpercrtc; 735extern unsigned int i915_fbpercrtc;
705extern unsigned int i915_powersave; 736extern unsigned int i915_powersave;
737extern unsigned int i915_lvds_downclock;
706 738
707extern void i915_save_display(struct drm_device *dev); 739extern void i915_save_display(struct drm_device *dev);
708extern void i915_restore_display(struct drm_device *dev); 740extern void i915_restore_display(struct drm_device *dev);
@@ -794,6 +826,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
794 struct drm_file *file_priv); 826 struct drm_file *file_priv);
795int i915_gem_execbuffer(struct drm_device *dev, void *data, 827int i915_gem_execbuffer(struct drm_device *dev, void *data,
796 struct drm_file *file_priv); 828 struct drm_file *file_priv);
829int i915_gem_execbuffer2(struct drm_device *dev, void *data,
830 struct drm_file *file_priv);
797int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 831int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
798 struct drm_file *file_priv); 832 struct drm_file *file_priv);
799int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 833int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -843,12 +877,13 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptib
843int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 877int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
844int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 878int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
845 int write); 879 int write);
880int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
846int i915_gem_attach_phys_object(struct drm_device *dev, 881int i915_gem_attach_phys_object(struct drm_device *dev,
847 struct drm_gem_object *obj, int id); 882 struct drm_gem_object *obj, int id);
848void i915_gem_detach_phys_object(struct drm_device *dev, 883void i915_gem_detach_phys_object(struct drm_device *dev,
849 struct drm_gem_object *obj); 884 struct drm_gem_object *obj);
850void i915_gem_free_all_phys_object(struct drm_device *dev); 885void i915_gem_free_all_phys_object(struct drm_device *dev);
851int i915_gem_object_get_pages(struct drm_gem_object *obj); 886int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
852void i915_gem_object_put_pages(struct drm_gem_object *obj); 887void i915_gem_object_put_pages(struct drm_gem_object *obj);
853void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 888void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
854void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); 889void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
@@ -860,6 +895,9 @@ void i915_gem_shrinker_exit(void);
860void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 895void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
861void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 896void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
862void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 897void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
898bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
899 int tiling_mode);
900bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
863 901
864/* i915_gem_debug.c */ 902/* i915_gem_debug.c */
865void i915_gem_dump_object(struct drm_gem_object *obj, int len, 903void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -982,67 +1020,33 @@ extern void g4x_disable_fbc(struct drm_device *dev);
982extern int i915_wrap_ring(struct drm_device * dev); 1020extern int i915_wrap_ring(struct drm_device * dev);
983extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 1021extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
984 1022
985#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1023#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
986#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1024
987#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1025#define IS_I830(dev) ((dev)->pci_device == 0x3577)
988#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1026#define IS_845G(dev) ((dev)->pci_device == 0x2562)
989#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) 1027#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
990 1028#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
991#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) 1029#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
992#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1030#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
993#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1031#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
994#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ 1032#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
995 (dev)->pci_device == 0x27AE) 1033#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
996#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ 1034#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
997 (dev)->pci_device == 0x2982 || \ 1035#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
998 (dev)->pci_device == 0x2992 || \ 1036#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
999 (dev)->pci_device == 0x29A2 || \ 1037#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1000 (dev)->pci_device == 0x2A02 || \ 1038#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1001 (dev)->pci_device == 0x2A12 || \ 1039#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1002 (dev)->pci_device == 0x2A42 || \ 1040#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1003 (dev)->pci_device == 0x2E02 || \ 1041#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1004 (dev)->pci_device == 0x2E12 || \
1005 (dev)->pci_device == 0x2E22 || \
1006 (dev)->pci_device == 0x2E32 || \
1007 (dev)->pci_device == 0x2E42 || \
1008 (dev)->pci_device == 0x0042 || \
1009 (dev)->pci_device == 0x0046)
1010
1011#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
1012 (dev)->pci_device == 0x2A12)
1013
1014#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1015
1016#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
1017 (dev)->pci_device == 0x2E12 || \
1018 (dev)->pci_device == 0x2E22 || \
1019 (dev)->pci_device == 0x2E32 || \
1020 (dev)->pci_device == 0x2E42 || \
1021 IS_GM45(dev))
1022
1023#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1024#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1025#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
1026
1027#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
1028 (dev)->pci_device == 0x29B2 || \
1029 (dev)->pci_device == 0x29D2 || \
1030 (IS_PINEVIEW(dev)))
1031
1032#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1042#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1033#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1043#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1034#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev)) 1044#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
1035 1045#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1036#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 1046#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1037 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
1038 IS_IRONLAKE(dev))
1039 1047
1040#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 1048#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1041 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
1042 IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
1043 1049
1044#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
1045 IS_IRONLAKE(dev))
1046/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1050/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1047 * rows, which changed the alignment requirements and fence programming. 1051 * rows, which changed the alignment requirements and fence programming.
1048 */ 1052 */
@@ -1054,17 +1058,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1054#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1058#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1055#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ 1059#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
1056 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) 1060 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
1057#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) 1061#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1058/* dsparb controlled by hw only */ 1062/* dsparb controlled by hw only */
1059#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1063#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1060 1064
1061#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) 1065#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
1062#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1066#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1063#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ 1067#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1064 (IS_I9XX(dev) || IS_GM45(dev)) && \ 1068#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1065 !IS_PINEVIEW(dev) && \
1066 !IS_IRONLAKE(dev))
1067#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
1068 1069
1069#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1070#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1070 1071
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c463cf2050a..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
277 277
278 mutex_lock(&dev->struct_mutex); 278 mutex_lock(&dev->struct_mutex);
279 279
280 ret = i915_gem_object_get_pages(obj); 280 ret = i915_gem_object_get_pages(obj, 0);
281 if (ret != 0) 281 if (ret != 0)
282 goto fail_unlock; 282 goto fail_unlock;
283 283
@@ -321,40 +321,24 @@ fail_unlock:
321 return ret; 321 return ret;
322} 322}
323 323
324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int 324static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 325i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{ 326{
339 int ret; 327 int ret;
340 328
341 ret = i915_gem_object_get_pages(obj); 329 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
342 330
343 /* If we've insufficient memory to map in the pages, attempt 331 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers. 332 * to make some space by throwing out some old buffers.
345 */ 333 */
346 if (ret == -ENOMEM) { 334 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev; 335 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349 336
350 ret = i915_gem_evict_something(dev, obj->size); 337 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret) 338 if (ret)
352 return ret; 339 return ret;
353 340
354 gfp = i915_gem_object_get_page_gfp_mask(obj); 341 ret = i915_gem_object_get_pages(obj, 0);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 } 342 }
359 343
360 return ret; 344 return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
790 774
791 mutex_lock(&dev->struct_mutex); 775 mutex_lock(&dev->struct_mutex);
792 776
793 ret = i915_gem_object_get_pages(obj); 777 ret = i915_gem_object_get_pages(obj, 0);
794 if (ret != 0) 778 if (ret != 0)
795 goto fail_unlock; 779 goto fail_unlock;
796 780
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1568 else 1552 else
1569 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1570 1554
1555 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1556
1571 obj_priv->last_rendering_seqno = 0; 1557 obj_priv->last_rendering_seqno = 0;
1572 if (obj_priv->active) { 1558 if (obj_priv->active) {
1573 obj_priv->active = 0; 1559 obj_priv->active = 0;
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1638 struct drm_i915_gem_object *obj_priv, *next; 1624 struct drm_i915_gem_object *obj_priv, *next;
1639 1625
1640 list_for_each_entry_safe(obj_priv, next, 1626 list_for_each_entry_safe(obj_priv, next,
1641 &dev_priv->mm.flushing_list, list) { 1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1642 struct drm_gem_object *obj = obj_priv->obj; 1629 struct drm_gem_object *obj = obj_priv->obj;
1643 1630
1644 if ((obj->write_domain & flush_domains) == 1631 if ((obj->write_domain & flush_domains) ==
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1646 uint32_t old_write_domain = obj->write_domain; 1633 uint32_t old_write_domain = obj->write_domain;
1647 1634
1648 obj->write_domain = 0; 1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1649 i915_gem_object_move_to_active(obj, seqno); 1637 i915_gem_object_move_to_active(obj, seqno);
1650 1638
1651 trace_i915_gem_object_change_domain(obj, 1639 trace_i915_gem_object_change_domain(obj,
@@ -2021,9 +2009,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2021 /* blow away mappings if mapped through GTT */ 2009 /* blow away mappings if mapped through GTT */
2022 i915_gem_release_mmap(obj); 2010 i915_gem_release_mmap(obj);
2023 2011
2024 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2025 i915_gem_clear_fence_reg(obj);
2026
2027 /* Move the object to the CPU domain to ensure that 2012 /* Move the object to the CPU domain to ensure that
2028 * any possible CPU writes while it's not in the GTT 2013 * any possible CPU writes while it's not in the GTT
2029 * are flushed when we go to remap it. This will 2014 * are flushed when we go to remap it. This will
@@ -2039,6 +2024,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2039 2024
2040 BUG_ON(obj_priv->active); 2025 BUG_ON(obj_priv->active);
2041 2026
2027 /* release the fence reg _after_ flushing */
2028 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2029 i915_gem_clear_fence_reg(obj);
2030
2042 if (obj_priv->agp_mem != NULL) { 2031 if (obj_priv->agp_mem != NULL) {
2043 drm_unbind_agp(obj_priv->agp_mem); 2032 drm_unbind_agp(obj_priv->agp_mem);
2044 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); 2033 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
@@ -2099,8 +2088,8 @@ static int
2099i915_gem_evict_everything(struct drm_device *dev) 2088i915_gem_evict_everything(struct drm_device *dev)
2100{ 2089{
2101 drm_i915_private_t *dev_priv = dev->dev_private; 2090 drm_i915_private_t *dev_priv = dev->dev_private;
2102 uint32_t seqno;
2103 int ret; 2091 int ret;
2092 uint32_t seqno;
2104 bool lists_empty; 2093 bool lists_empty;
2105 2094
2106 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2122,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
2122 if (ret) 2111 if (ret)
2123 return ret; 2112 return ret;
2124 2113
2114 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2115
2125 ret = i915_gem_evict_from_inactive_list(dev); 2116 ret = i915_gem_evict_from_inactive_list(dev);
2126 if (ret) 2117 if (ret)
2127 return ret; 2118 return ret;
@@ -2229,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2229} 2220}
2230 2221
2231int 2222int
2232i915_gem_object_get_pages(struct drm_gem_object *obj) 2223i915_gem_object_get_pages(struct drm_gem_object *obj,
2224 gfp_t gfpmask)
2233{ 2225{
2234 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2226 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2235 int page_count, i; 2227 int page_count, i;
@@ -2255,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2255 inode = obj->filp->f_path.dentry->d_inode; 2247 inode = obj->filp->f_path.dentry->d_inode;
2256 mapping = inode->i_mapping; 2248 mapping = inode->i_mapping;
2257 for (i = 0; i < page_count; i++) { 2249 for (i = 0; i < page_count; i++) {
2258 page = read_mapping_page(mapping, i, NULL); 2250 page = read_cache_page_gfp(mapping, i,
2251 mapping_gfp_mask (mapping) |
2252 __GFP_COLD |
2253 gfpmask);
2259 if (IS_ERR(page)) { 2254 if (IS_ERR(page)) {
2260 ret = PTR_ERR(page); 2255 ret = PTR_ERR(page);
2261 i915_gem_object_put_pages(obj); 2256 i915_gem_object_put_pages(obj);
@@ -2578,12 +2573,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2578 drm_i915_private_t *dev_priv = dev->dev_private; 2573 drm_i915_private_t *dev_priv = dev->dev_private;
2579 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2574 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2580 struct drm_mm_node *free_space; 2575 struct drm_mm_node *free_space;
2581 bool retry_alloc = false; 2576 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2582 int ret; 2577 int ret;
2583 2578
2584 if (dev_priv->mm.suspended)
2585 return -EBUSY;
2586
2587 if (obj_priv->madv != I915_MADV_WILLNEED) { 2579 if (obj_priv->madv != I915_MADV_WILLNEED) {
2588 DRM_ERROR("Attempting to bind a purgeable object\n"); 2580 DRM_ERROR("Attempting to bind a purgeable object\n");
2589 return -EINVAL; 2581 return -EINVAL;
@@ -2625,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2625 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2617 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2626 obj->size, obj_priv->gtt_offset); 2618 obj->size, obj_priv->gtt_offset);
2627#endif 2619#endif
2628 if (retry_alloc) { 2620 ret = i915_gem_object_get_pages(obj, gfpmask);
2629 i915_gem_object_set_page_gfp_mask (obj,
2630 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2631 }
2632 ret = i915_gem_object_get_pages(obj);
2633 if (retry_alloc) {
2634 i915_gem_object_set_page_gfp_mask (obj,
2635 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2636 }
2637 if (ret) { 2621 if (ret) {
2638 drm_mm_put_block(obj_priv->gtt_space); 2622 drm_mm_put_block(obj_priv->gtt_space);
2639 obj_priv->gtt_space = NULL; 2623 obj_priv->gtt_space = NULL;
@@ -2643,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2643 ret = i915_gem_evict_something(dev, obj->size); 2627 ret = i915_gem_evict_something(dev, obj->size);
2644 if (ret) { 2628 if (ret) {
2645 /* now try to shrink everyone else */ 2629 /* now try to shrink everyone else */
2646 if (! retry_alloc) { 2630 if (gfpmask) {
2647 retry_alloc = true; 2631 gfpmask = 0;
2648 goto search_free; 2632 goto search_free;
2649 } 2633 }
2650 2634
2651 return ret; 2635 return ret;
@@ -2723,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2723 old_write_domain = obj->write_domain; 2707 old_write_domain = obj->write_domain;
2724 i915_gem_flush(dev, 0, obj->write_domain); 2708 i915_gem_flush(dev, 0, obj->write_domain);
2725 seqno = i915_add_request(dev, NULL, obj->write_domain); 2709 seqno = i915_add_request(dev, NULL, obj->write_domain);
2726 obj->write_domain = 0; 2710 BUG_ON(obj->write_domain);
2727 i915_gem_object_move_to_active(obj, seqno); 2711 i915_gem_object_move_to_active(obj, seqno);
2728 2712
2729 trace_i915_gem_object_change_domain(obj, 2713 trace_i915_gem_object_change_domain(obj,
@@ -2839,6 +2823,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2839 return 0; 2823 return 0;
2840} 2824}
2841 2825
2826/*
2827 * Prepare buffer for display plane. Use uninterruptible for possible flush
2828 * wait, as in modesetting process we're not supposed to be interrupted.
2829 */
2830int
2831i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2832{
2833 struct drm_device *dev = obj->dev;
2834 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2835 uint32_t old_write_domain, old_read_domains;
2836 int ret;
2837
2838 /* Not valid to be called on unbound objects. */
2839 if (obj_priv->gtt_space == NULL)
2840 return -EINVAL;
2841
2842 i915_gem_object_flush_gpu_write_domain(obj);
2843
2844 /* Wait on any GPU rendering and flushing to occur. */
2845 if (obj_priv->active) {
2846#if WATCH_BUF
2847 DRM_INFO("%s: object %p wait for seqno %08x\n",
2848 __func__, obj, obj_priv->last_rendering_seqno);
2849#endif
2850 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
2851 if (ret != 0)
2852 return ret;
2853 }
2854
2855 old_write_domain = obj->write_domain;
2856 old_read_domains = obj->read_domains;
2857
2858 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2859
2860 i915_gem_object_flush_cpu_write_domain(obj);
2861
2862 /* It should now be out of any other write domains, and we can update
2863 * the domain values for our changes.
2864 */
2865 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2866 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2867 obj->write_domain = I915_GEM_DOMAIN_GTT;
2868 obj_priv->dirty = 1;
2869
2870 trace_i915_gem_object_change_domain(obj,
2871 old_read_domains,
2872 old_write_domain);
2873
2874 return 0;
2875}
2876
2842/** 2877/**
2843 * Moves a single object to the CPU read, and possibly write domain. 2878 * Moves a single object to the CPU read, and possibly write domain.
2844 * 2879 *
@@ -3198,7 +3233,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3198static int 3233static int
3199i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 3234i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3200 struct drm_file *file_priv, 3235 struct drm_file *file_priv,
3201 struct drm_i915_gem_exec_object *entry, 3236 struct drm_i915_gem_exec_object2 *entry,
3202 struct drm_i915_gem_relocation_entry *relocs) 3237 struct drm_i915_gem_relocation_entry *relocs)
3203{ 3238{
3204 struct drm_device *dev = obj->dev; 3239 struct drm_device *dev = obj->dev;
@@ -3206,12 +3241,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3206 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3241 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3207 int i, ret; 3242 int i, ret;
3208 void __iomem *reloc_page; 3243 void __iomem *reloc_page;
3244 bool need_fence;
3245
3246 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3247 obj_priv->tiling_mode != I915_TILING_NONE;
3248
3249 /* Check fence reg constraints and rebind if necessary */
3250 if (need_fence && !i915_obj_fenceable(dev, obj))
3251 i915_gem_object_unbind(obj);
3209 3252
3210 /* Choose the GTT offset for our buffer and put it there. */ 3253 /* Choose the GTT offset for our buffer and put it there. */
3211 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 3254 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3212 if (ret) 3255 if (ret)
3213 return ret; 3256 return ret;
3214 3257
3258 /*
3259 * Pre-965 chips need a fence register set up in order to
3260 * properly handle blits to/from tiled surfaces.
3261 */
3262 if (need_fence) {
3263 ret = i915_gem_object_get_fence_reg(obj);
3264 if (ret != 0) {
3265 if (ret != -EBUSY && ret != -ERESTARTSYS)
3266 DRM_ERROR("Failure to install fence: %d\n",
3267 ret);
3268 i915_gem_object_unpin(obj);
3269 return ret;
3270 }
3271 }
3272
3215 entry->offset = obj_priv->gtt_offset; 3273 entry->offset = obj_priv->gtt_offset;
3216 3274
3217 /* Apply the relocations, using the GTT aperture to avoid cache 3275 /* Apply the relocations, using the GTT aperture to avoid cache
@@ -3373,7 +3431,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3373 */ 3431 */
3374static int 3432static int
3375i915_dispatch_gem_execbuffer(struct drm_device *dev, 3433i915_dispatch_gem_execbuffer(struct drm_device *dev,
3376 struct drm_i915_gem_execbuffer *exec, 3434 struct drm_i915_gem_execbuffer2 *exec,
3377 struct drm_clip_rect *cliprects, 3435 struct drm_clip_rect *cliprects,
3378 uint64_t exec_offset) 3436 uint64_t exec_offset)
3379{ 3437{
@@ -3463,7 +3521,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3463} 3521}
3464 3522
3465static int 3523static int
3466i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, 3524i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
3467 uint32_t buffer_count, 3525 uint32_t buffer_count,
3468 struct drm_i915_gem_relocation_entry **relocs) 3526 struct drm_i915_gem_relocation_entry **relocs)
3469{ 3527{
@@ -3478,8 +3536,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3478 } 3536 }
3479 3537
3480 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); 3538 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3481 if (*relocs == NULL) 3539 if (*relocs == NULL) {
3540 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
3482 return -ENOMEM; 3541 return -ENOMEM;
3542 }
3483 3543
3484 for (i = 0; i < buffer_count; i++) { 3544 for (i = 0; i < buffer_count; i++) {
3485 struct drm_i915_gem_relocation_entry __user *user_relocs; 3545 struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3503,13 +3563,16 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3503} 3563}
3504 3564
3505static int 3565static int
3506i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, 3566i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3507 uint32_t buffer_count, 3567 uint32_t buffer_count,
3508 struct drm_i915_gem_relocation_entry *relocs) 3568 struct drm_i915_gem_relocation_entry *relocs)
3509{ 3569{
3510 uint32_t reloc_count = 0, i; 3570 uint32_t reloc_count = 0, i;
3511 int ret = 0; 3571 int ret = 0;
3512 3572
3573 if (relocs == NULL)
3574 return 0;
3575
3513 for (i = 0; i < buffer_count; i++) { 3576 for (i = 0; i < buffer_count; i++) {
3514 struct drm_i915_gem_relocation_entry __user *user_relocs; 3577 struct drm_i915_gem_relocation_entry __user *user_relocs;
3515 int unwritten; 3578 int unwritten;
@@ -3536,7 +3599,7 @@ err:
3536} 3599}
3537 3600
3538static int 3601static int
3539i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, 3602i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
3540 uint64_t exec_offset) 3603 uint64_t exec_offset)
3541{ 3604{
3542 uint32_t exec_start, exec_len; 3605 uint32_t exec_start, exec_len;
@@ -3589,18 +3652,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
3589} 3652}
3590 3653
3591int 3654int
3592i915_gem_execbuffer(struct drm_device *dev, void *data, 3655i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3593 struct drm_file *file_priv) 3656 struct drm_file *file_priv,
3657 struct drm_i915_gem_execbuffer2 *args,
3658 struct drm_i915_gem_exec_object2 *exec_list)
3594{ 3659{
3595 drm_i915_private_t *dev_priv = dev->dev_private; 3660 drm_i915_private_t *dev_priv = dev->dev_private;
3596 struct drm_i915_gem_execbuffer *args = data;
3597 struct drm_i915_gem_exec_object *exec_list = NULL;
3598 struct drm_gem_object **object_list = NULL; 3661 struct drm_gem_object **object_list = NULL;
3599 struct drm_gem_object *batch_obj; 3662 struct drm_gem_object *batch_obj;
3600 struct drm_i915_gem_object *obj_priv; 3663 struct drm_i915_gem_object *obj_priv;
3601 struct drm_clip_rect *cliprects = NULL; 3664 struct drm_clip_rect *cliprects = NULL;
3602 struct drm_i915_gem_relocation_entry *relocs; 3665 struct drm_i915_gem_relocation_entry *relocs = NULL;
3603 int ret, ret2, i, pinned = 0; 3666 int ret = 0, ret2, i, pinned = 0;
3604 uint64_t exec_offset; 3667 uint64_t exec_offset;
3605 uint32_t seqno, flush_domains, reloc_index; 3668 uint32_t seqno, flush_domains, reloc_index;
3606 int pin_tries, flips; 3669 int pin_tries, flips;
@@ -3614,31 +3677,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3614 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3677 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3615 return -EINVAL; 3678 return -EINVAL;
3616 } 3679 }
3617 /* Copy in the exec list from userland */
3618 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3619 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); 3680 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3620 if (exec_list == NULL || object_list == NULL) { 3681 if (object_list == NULL) {
3621 DRM_ERROR("Failed to allocate exec or object list " 3682 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3622 "for %d buffers\n",
3623 args->buffer_count); 3683 args->buffer_count);
3624 ret = -ENOMEM; 3684 ret = -ENOMEM;
3625 goto pre_mutex_err; 3685 goto pre_mutex_err;
3626 } 3686 }
3627 ret = copy_from_user(exec_list,
3628 (struct drm_i915_relocation_entry __user *)
3629 (uintptr_t) args->buffers_ptr,
3630 sizeof(*exec_list) * args->buffer_count);
3631 if (ret != 0) {
3632 DRM_ERROR("copy %d exec entries failed %d\n",
3633 args->buffer_count, ret);
3634 goto pre_mutex_err;
3635 }
3636 3687
3637 if (args->num_cliprects != 0) { 3688 if (args->num_cliprects != 0) {
3638 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3689 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3639 GFP_KERNEL); 3690 GFP_KERNEL);
3640 if (cliprects == NULL) 3691 if (cliprects == NULL) {
3692 ret = -ENOMEM;
3641 goto pre_mutex_err; 3693 goto pre_mutex_err;
3694 }
3642 3695
3643 ret = copy_from_user(cliprects, 3696 ret = copy_from_user(cliprects,
3644 (struct drm_clip_rect __user *) 3697 (struct drm_clip_rect __user *)
@@ -3680,6 +3733,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3680 if (object_list[i] == NULL) { 3733 if (object_list[i] == NULL) {
3681 DRM_ERROR("Invalid object handle %d at index %d\n", 3734 DRM_ERROR("Invalid object handle %d at index %d\n",
3682 exec_list[i].handle, i); 3735 exec_list[i].handle, i);
3736 /* prevent error path from reading uninitialized data */
3737 args->buffer_count = i + 1;
3683 ret = -EBADF; 3738 ret = -EBADF;
3684 goto err; 3739 goto err;
3685 } 3740 }
@@ -3688,6 +3743,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3688 if (obj_priv->in_execbuffer) { 3743 if (obj_priv->in_execbuffer) {
3689 DRM_ERROR("Object %p appears more than once in object list\n", 3744 DRM_ERROR("Object %p appears more than once in object list\n",
3690 object_list[i]); 3745 object_list[i]);
3746 /* prevent error path from reading uninitialized data */
3747 args->buffer_count = i + 1;
3691 ret = -EBADF; 3748 ret = -EBADF;
3692 goto err; 3749 goto err;
3693 } 3750 }
@@ -3801,16 +3858,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3801 i915_gem_flush(dev, 3858 i915_gem_flush(dev,
3802 dev->invalidate_domains, 3859 dev->invalidate_domains,
3803 dev->flush_domains); 3860 dev->flush_domains);
3804 if (dev->flush_domains) 3861 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3805 (void)i915_add_request(dev, file_priv, 3862 (void)i915_add_request(dev, file_priv,
3806 dev->flush_domains); 3863 dev->flush_domains);
3807 } 3864 }
3808 3865
3809 for (i = 0; i < args->buffer_count; i++) { 3866 for (i = 0; i < args->buffer_count; i++) {
3810 struct drm_gem_object *obj = object_list[i]; 3867 struct drm_gem_object *obj = object_list[i];
3868 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3811 uint32_t old_write_domain = obj->write_domain; 3869 uint32_t old_write_domain = obj->write_domain;
3812 3870
3813 obj->write_domain = obj->pending_write_domain; 3871 obj->write_domain = obj->pending_write_domain;
3872 if (obj->write_domain)
3873 list_move_tail(&obj_priv->gpu_write_list,
3874 &dev_priv->mm.gpu_write_list);
3875 else
3876 list_del_init(&obj_priv->gpu_write_list);
3877
3814 trace_i915_gem_object_change_domain(obj, 3878 trace_i915_gem_object_change_domain(obj,
3815 obj->read_domains, 3879 obj->read_domains,
3816 old_write_domain); 3880 old_write_domain);
@@ -3884,8 +3948,101 @@ err:
3884 3948
3885 mutex_unlock(&dev->struct_mutex); 3949 mutex_unlock(&dev->struct_mutex);
3886 3950
3951pre_mutex_err:
3952 /* Copy the updated relocations out regardless of current error
3953 * state. Failure to update the relocs would mean that the next
3954 * time userland calls execbuf, it would do so with presumed offset
3955 * state that didn't match the actual object state.
3956 */
3957 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3958 relocs);
3959 if (ret2 != 0) {
3960 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3961
3962 if (ret == 0)
3963 ret = ret2;
3964 }
3965
3966 drm_free_large(object_list);
3967 kfree(cliprects);
3968
3969 return ret;
3970}
3971
3972/*
3973 * Legacy execbuffer just creates an exec2 list from the original exec object
3974 * list array and passes it to the real function.
3975 */
3976int
3977i915_gem_execbuffer(struct drm_device *dev, void *data,
3978 struct drm_file *file_priv)
3979{
3980 struct drm_i915_gem_execbuffer *args = data;
3981 struct drm_i915_gem_execbuffer2 exec2;
3982 struct drm_i915_gem_exec_object *exec_list = NULL;
3983 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3984 int ret, i;
3985
3986#if WATCH_EXEC
3987 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3988 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3989#endif
3990
3991 if (args->buffer_count < 1) {
3992 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3993 return -EINVAL;
3994 }
3995
3996 /* Copy in the exec list from userland */
3997 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3998 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3999 if (exec_list == NULL || exec2_list == NULL) {
4000 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4001 args->buffer_count);
4002 drm_free_large(exec_list);
4003 drm_free_large(exec2_list);
4004 return -ENOMEM;
4005 }
4006 ret = copy_from_user(exec_list,
4007 (struct drm_i915_relocation_entry __user *)
4008 (uintptr_t) args->buffers_ptr,
4009 sizeof(*exec_list) * args->buffer_count);
4010 if (ret != 0) {
4011 DRM_ERROR("copy %d exec entries failed %d\n",
4012 args->buffer_count, ret);
4013 drm_free_large(exec_list);
4014 drm_free_large(exec2_list);
4015 return -EFAULT;
4016 }
4017
4018 for (i = 0; i < args->buffer_count; i++) {
4019 exec2_list[i].handle = exec_list[i].handle;
4020 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4021 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4022 exec2_list[i].alignment = exec_list[i].alignment;
4023 exec2_list[i].offset = exec_list[i].offset;
4024 if (!IS_I965G(dev))
4025 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4026 else
4027 exec2_list[i].flags = 0;
4028 }
4029
4030 exec2.buffers_ptr = args->buffers_ptr;
4031 exec2.buffer_count = args->buffer_count;
4032 exec2.batch_start_offset = args->batch_start_offset;
4033 exec2.batch_len = args->batch_len;
4034 exec2.DR1 = args->DR1;
4035 exec2.DR4 = args->DR4;
4036 exec2.num_cliprects = args->num_cliprects;
4037 exec2.cliprects_ptr = args->cliprects_ptr;
4038 exec2.flags = 0;
4039
4040 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3887 if (!ret) { 4041 if (!ret) {
3888 /* Copy the new buffer offsets back to the user's exec list. */ 4042 /* Copy the new buffer offsets back to the user's exec list. */
4043 for (i = 0; i < args->buffer_count; i++)
4044 exec_list[i].offset = exec2_list[i].offset;
4045 /* ... and back out to userspace */
3889 ret = copy_to_user((struct drm_i915_relocation_entry __user *) 4046 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3890 (uintptr_t) args->buffers_ptr, 4047 (uintptr_t) args->buffers_ptr,
3891 exec_list, 4048 exec_list,
@@ -3898,25 +4055,62 @@ err:
3898 } 4055 }
3899 } 4056 }
3900 4057
3901 /* Copy the updated relocations out regardless of current error 4058 drm_free_large(exec_list);
3902 * state. Failure to update the relocs would mean that the next 4059 drm_free_large(exec2_list);
3903 * time userland calls execbuf, it would do so with presumed offset 4060 return ret;
3904 * state that didn't match the actual object state. 4061}
3905 */
3906 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3907 relocs);
3908 if (ret2 != 0) {
3909 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3910 4062
3911 if (ret == 0) 4063int
3912 ret = ret2; 4064i915_gem_execbuffer2(struct drm_device *dev, void *data,
4065 struct drm_file *file_priv)
4066{
4067 struct drm_i915_gem_execbuffer2 *args = data;
4068 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4069 int ret;
4070
4071#if WATCH_EXEC
4072 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4073 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4074#endif
4075
4076 if (args->buffer_count < 1) {
4077 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4078 return -EINVAL;
3913 } 4079 }
3914 4080
3915pre_mutex_err: 4081 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3916 drm_free_large(object_list); 4082 if (exec2_list == NULL) {
3917 drm_free_large(exec_list); 4083 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3918 kfree(cliprects); 4084 args->buffer_count);
4085 return -ENOMEM;
4086 }
4087 ret = copy_from_user(exec2_list,
4088 (struct drm_i915_relocation_entry __user *)
4089 (uintptr_t) args->buffers_ptr,
4090 sizeof(*exec2_list) * args->buffer_count);
4091 if (ret != 0) {
4092 DRM_ERROR("copy %d exec entries failed %d\n",
4093 args->buffer_count, ret);
4094 drm_free_large(exec2_list);
4095 return -EFAULT;
4096 }
4097
4098 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4099 if (!ret) {
4100 /* Copy the new buffer offsets back to the user's exec list. */
4101 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4102 (uintptr_t) args->buffers_ptr,
4103 exec2_list,
4104 sizeof(*exec2_list) * args->buffer_count);
4105 if (ret) {
4106 ret = -EFAULT;
4107 DRM_ERROR("failed to copy %d exec entries "
4108 "back to user (%d)\n",
4109 args->buffer_count, ret);
4110 }
4111 }
3919 4112
4113 drm_free_large(exec2_list);
3920 return ret; 4114 return ret;
3921} 4115}
3922 4116
@@ -3933,19 +4127,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3933 if (ret) 4127 if (ret)
3934 return ret; 4128 return ret;
3935 } 4129 }
3936 /* 4130
3937 * Pre-965 chips need a fence register set up in order to
3938 * properly handle tiled surfaces.
3939 */
3940 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3941 ret = i915_gem_object_get_fence_reg(obj);
3942 if (ret != 0) {
3943 if (ret != -EBUSY && ret != -ERESTARTSYS)
3944 DRM_ERROR("Failure to install fence: %d\n",
3945 ret);
3946 return ret;
3947 }
3948 }
3949 obj_priv->pin_count++; 4131 obj_priv->pin_count++;
3950 4132
3951 /* If the object is not active and not pending a flush, 4133 /* If the object is not active and not pending a flush,
@@ -4203,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4203 obj_priv->obj = obj; 4385 obj_priv->obj = obj;
4204 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4386 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4205 INIT_LIST_HEAD(&obj_priv->list); 4387 INIT_LIST_HEAD(&obj_priv->list);
4388 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4206 INIT_LIST_HEAD(&obj_priv->fence_list); 4389 INIT_LIST_HEAD(&obj_priv->fence_list);
4207 obj_priv->madv = I915_MADV_WILLNEED; 4390 obj_priv->madv = I915_MADV_WILLNEED;
4208 4391
@@ -4654,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
4654 spin_lock_init(&dev_priv->mm.active_list_lock); 4837 spin_lock_init(&dev_priv->mm.active_list_lock);
4655 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4838 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4656 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4839 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4657 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4841 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4658 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4842 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4659 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4843 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4708,7 +4892,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
4708 4892
4709 phys_obj->id = id; 4893 phys_obj->id = id;
4710 4894
4711 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); 4895 phys_obj->handle = drm_pci_alloc(dev, size, 0);
4712 if (!phys_obj->handle) { 4896 if (!phys_obj->handle) {
4713 ret = -ENOMEM; 4897 ret = -ENOMEM;
4714 goto kfree_obj; 4898 goto kfree_obj;
@@ -4766,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4766 if (!obj_priv->phys_obj) 4950 if (!obj_priv->phys_obj)
4767 return; 4951 return;
4768 4952
4769 ret = i915_gem_object_get_pages(obj); 4953 ret = i915_gem_object_get_pages(obj, 0);
4770 if (ret) 4954 if (ret)
4771 goto out; 4955 goto out;
4772 4956
@@ -4824,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4824 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 5008 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4825 obj_priv->phys_obj->cur_obj = obj; 5009 obj_priv->phys_obj->cur_obj = obj;
4826 5010
4827 ret = i915_gem_object_get_pages(obj); 5011 ret = i915_gem_object_get_pages(obj, 0);
4828 if (ret) { 5012 if (ret) {
4829 DRM_ERROR("failed to get page list\n"); 5013 DRM_ERROR("failed to get page list\n");
4830 goto out; 5014 goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 30d6af6c09bb..df278b2685bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
304 304
305 305
306/** 306/**
307 * Returns the size of the fence for a tiled object of the given size. 307 * Returns whether an object is currently fenceable. If not, it may need
308 * to be unbound and have its pitch adjusted.
308 */ 309 */
309static int 310bool
310i915_get_fence_size(struct drm_device *dev, int size) 311i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
311{ 312{
312 int i; 313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
313 int start;
314 314
315 if (IS_I965G(dev)) { 315 if (IS_I965G(dev)) {
316 /* The 965 can have fences at any page boundary. */ 316 /* The 965 can have fences at any page boundary. */
317 return ALIGN(size, 4096); 317 if (obj->size & 4095)
318 return false;
319 return true;
320 } else if (IS_I9XX(dev)) {
321 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
322 return false;
318 } else { 323 } else {
319 /* Align the size to a power of two greater than the smallest 324 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
320 * fence size. 325 return false;
321 */ 326 }
322 if (IS_I9XX(dev))
323 start = 1024 * 1024;
324 else
325 start = 512 * 1024;
326 327
327 for (i = start; i < size; i <<= 1) 328 /* Power of two sized... */
328 ; 329 if (obj->size & (obj->size - 1))
330 return false;
329 331
330 return i; 332 /* Objects must be size aligned as well */
331 } 333 if (obj_priv->gtt_offset & (obj->size - 1))
334 return false;
335 return true;
332} 336}
333 337
334/* Check pitch constriants for all chips & tiling formats */ 338/* Check pitch constriants for all chips & tiling formats */
335static bool 339bool
336i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 340i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
337{ 341{
338 int tile_width; 342 int tile_width;
@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
384 if (stride & (stride - 1)) 388 if (stride & (stride - 1))
385 return false; 389 return false;
386 390
387 /* We don't 0handle the aperture area covered by the fence being bigger
388 * than the object size.
389 */
390 if (i915_get_fence_size(dev, size) != size)
391 return false;
392
393 return true; 391 return true;
394} 392}
395 393
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 85f4c5de97e2..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,7 +274,6 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
275 int ret = IRQ_NONE; 275 int ret = IRQ_NONE;
276 u32 de_iir, gt_iir, de_ier, pch_iir; 276 u32 de_iir, gt_iir, de_ier, pch_iir;
277 u32 new_de_iir, new_gt_iir, new_pch_iir;
278 struct drm_i915_master_private *master_priv; 277 struct drm_i915_master_private *master_priv;
279 278
280 /* disable master interrupt before clearing iir */ 279 /* disable master interrupt before clearing iir */
@@ -286,49 +285,58 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
286 gt_iir = I915_READ(GTIIR); 285 gt_iir = I915_READ(GTIIR);
287 pch_iir = I915_READ(SDEIIR); 286 pch_iir = I915_READ(SDEIIR);
288 287
289 for (;;) { 288 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
290 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) 289 goto done;
291 break;
292 290
293 ret = IRQ_HANDLED; 291 ret = IRQ_HANDLED;
294 292
295 /* should clear PCH hotplug event before clear CPU irq */ 293 if (dev->primary->master) {
296 I915_WRITE(SDEIIR, pch_iir); 294 master_priv = dev->primary->master->driver_priv;
297 new_pch_iir = I915_READ(SDEIIR); 295 if (master_priv->sarea_priv)
296 master_priv->sarea_priv->last_dispatch =
297 READ_BREADCRUMB(dev_priv);
298 }
298 299
299 I915_WRITE(DEIIR, de_iir); 300 if (gt_iir & GT_USER_INTERRUPT) {
300 new_de_iir = I915_READ(DEIIR); 301 u32 seqno = i915_get_gem_seqno(dev);
301 I915_WRITE(GTIIR, gt_iir); 302 dev_priv->mm.irq_gem_seqno = seqno;
302 new_gt_iir = I915_READ(GTIIR); 303 trace_i915_gem_request_complete(dev, seqno);
304 DRM_WAKEUP(&dev_priv->irq_queue);
305 dev_priv->hangcheck_count = 0;
306 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
307 }
303 308
304 if (dev->primary->master) { 309 if (de_iir & DE_GSE)
305 master_priv = dev->primary->master->driver_priv; 310 ironlake_opregion_gse_intr(dev);
306 if (master_priv->sarea_priv)
307 master_priv->sarea_priv->last_dispatch =
308 READ_BREADCRUMB(dev_priv);
309 }
310 311
311 if (gt_iir & GT_USER_INTERRUPT) { 312 if (de_iir & DE_PLANEA_FLIP_DONE) {
312 u32 seqno = i915_get_gem_seqno(dev); 313 intel_prepare_page_flip(dev, 0);
313 dev_priv->mm.irq_gem_seqno = seqno; 314 intel_finish_page_flip(dev, 0);
314 trace_i915_gem_request_complete(dev, seqno); 315 }
315 DRM_WAKEUP(&dev_priv->irq_queue);
316 }
317 316
318 if (de_iir & DE_GSE) 317 if (de_iir & DE_PLANEB_FLIP_DONE) {
319 ironlake_opregion_gse_intr(dev); 318 intel_prepare_page_flip(dev, 1);
319 intel_finish_page_flip(dev, 1);
320 }
320 321
321 /* check event from PCH */ 322 if (de_iir & DE_PIPEA_VBLANK)
322 if ((de_iir & DE_PCH_EVENT) && 323 drm_handle_vblank(dev, 0);
323 (pch_iir & SDE_HOTPLUG_MASK)) {
324 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
325 }
326 324
327 de_iir = new_de_iir; 325 if (de_iir & DE_PIPEB_VBLANK)
328 gt_iir = new_gt_iir; 326 drm_handle_vblank(dev, 1);
329 pch_iir = new_pch_iir; 327
328 /* check event from PCH */
329 if ((de_iir & DE_PCH_EVENT) &&
330 (pch_iir & SDE_HOTPLUG_MASK)) {
331 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
330 } 332 }
331 333
334 /* should clear PCH hotplug event before clear CPU irq */
335 I915_WRITE(SDEIIR, pch_iir);
336 I915_WRITE(GTIIR, gt_iir);
337 I915_WRITE(DEIIR, de_iir);
338
339done:
332 I915_WRITE(DEIER, de_ier); 340 I915_WRITE(DEIER, de_ier);
333 (void)I915_READ(DEIER); 341 (void)I915_READ(DEIER);
334 342
@@ -852,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
852 if (!(pipeconf & PIPEACONF_ENABLE)) 860 if (!(pipeconf & PIPEACONF_ENABLE))
853 return -EINVAL; 861 return -EINVAL;
854 862
855 if (IS_IRONLAKE(dev))
856 return 0;
857
858 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
859 if (IS_I965G(dev)) 864 if (IS_IRONLAKE(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev))
860 i915_enable_pipestat(dev_priv, pipe, 868 i915_enable_pipestat(dev_priv, pipe,
861 PIPE_START_VBLANK_INTERRUPT_ENABLE); 869 PIPE_START_VBLANK_INTERRUPT_ENABLE);
862 else 870 else
@@ -874,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
874 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
875 unsigned long irqflags; 883 unsigned long irqflags;
876 884
877 if (IS_IRONLAKE(dev))
878 return;
879
880 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
881 i915_disable_pipestat(dev_priv, pipe, 886 if (IS_IRONLAKE(dev))
882 PIPE_VBLANK_INTERRUPT_ENABLE | 887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
883 PIPE_START_VBLANK_INTERRUPT_ENABLE); 888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else
890 i915_disable_pipestat(dev_priv, pipe,
891 PIPE_VBLANK_INTERRUPT_ENABLE |
892 PIPE_START_VBLANK_INTERRUPT_ENABLE);
884 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 893 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
885} 894}
886 895
@@ -1023,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1023{ 1032{
1024 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1033 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1025 /* enable kind of interrupts always enabled */ 1034 /* enable kind of interrupts always enabled */
1026 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; 1035 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1036 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1027 u32 render_mask = GT_USER_INTERRUPT; 1037 u32 render_mask = GT_USER_INTERRUPT;
1028 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1038 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1029 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1039 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1030 1040
1031 dev_priv->irq_mask_reg = ~display_mask; 1041 dev_priv->irq_mask_reg = ~display_mask;
1032 dev_priv->de_irq_enable_reg = display_mask; 1042 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1033 1043
1034 /* should always can generate irq */ 1044 /* should always can generate irq */
1035 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1045 I915_WRITE(DEIIR, I915_READ(DEIIR));
@@ -1084,6 +1094,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1084 (void) I915_READ(IER); 1094 (void) I915_READ(IER);
1085} 1095}
1086 1096
1097/*
1098 * Must be called after intel_modeset_init or hotplug interrupts won't be
1099 * enabled correctly.
1100 */
1087int i915_driver_irq_postinstall(struct drm_device *dev) 1101int i915_driver_irq_postinstall(struct drm_device *dev)
1088{ 1102{
1089 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1103 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1106,19 +1120,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1106 if (I915_HAS_HOTPLUG(dev)) { 1120 if (I915_HAS_HOTPLUG(dev)) {
1107 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1121 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1108 1122
1109 /* Leave other bits alone */ 1123 /* Note HDMI and DP share bits */
1110 hotplug_en |= HOTPLUG_EN_MASK; 1124 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1125 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1126 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1127 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1128 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1129 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1130 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1131 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1132 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1133 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1134 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
1135 hotplug_en |= CRT_HOTPLUG_INT_EN;
1136 /* Ignore TV since it's buggy */
1137
1111 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1138 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1112 1139
1113 dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
1114 TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
1115 SDVOB_HOTPLUG_INT_STATUS;
1116 if (IS_G4X(dev)) {
1117 dev_priv->hotplug_supported_mask |=
1118 HDMIB_HOTPLUG_INT_STATUS |
1119 HDMIC_HOTPLUG_INT_STATUS |
1120 HDMID_HOTPLUG_INT_STATUS;
1121 }
1122 /* Enable in IER... */ 1140 /* Enable in IER... */
1123 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1141 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1124 /* and unmask in IMR */ 1142 /* and unmask in IMR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 974b3cf70618..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
338#define FBC_CTL_PERIODIC (1<<30) 338#define FBC_CTL_PERIODIC (1<<30)
339#define FBC_CTL_INTERVAL_SHIFT (16) 339#define FBC_CTL_INTERVAL_SHIFT (16)
340#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 340#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
341#define FBC_C3_IDLE (1<<13)
341#define FBC_CTL_STRIDE_SHIFT (5) 342#define FBC_CTL_STRIDE_SHIFT (5)
342#define FBC_CTL_FENCENO (1<<0) 343#define FBC_CTL_FENCENO (1<<0)
343#define FBC_COMMAND 0x0320c 344#define FBC_COMMAND 0x0320c
@@ -879,13 +880,6 @@
879#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 880#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
880#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ 881#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
881#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f 882#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
882#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
883 HDMIC_HOTPLUG_INT_EN | \
884 HDMID_HOTPLUG_INT_EN | \
885 SDVOB_HOTPLUG_INT_EN | \
886 SDVOC_HOTPLUG_INT_EN | \
887 CRT_HOTPLUG_INT_EN)
888
889 883
890#define PORT_HOTPLUG_STAT 0x61114 884#define PORT_HOTPLUG_STAT 0x61114
891#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 885#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -982,6 +976,8 @@
982#define LVDS_PORT_EN (1 << 31) 976#define LVDS_PORT_EN (1 << 31)
983/* Selects pipe B for LVDS data. Must be set on pre-965. */ 977/* Selects pipe B for LVDS data. Must be set on pre-965. */
984#define LVDS_PIPEB_SELECT (1 << 30) 978#define LVDS_PIPEB_SELECT (1 << 30)
979/* LVDS dithering flag on 965/g4x platform */
980#define LVDS_ENABLE_DITHER (1 << 25)
985/* Enable border for unscaled (or aspect-scaled) display */ 981/* Enable border for unscaled (or aspect-scaled) display */
986#define LVDS_BORDER_ENABLE (1 << 15) 982#define LVDS_BORDER_ENABLE (1 << 15)
987/* 983/*
@@ -1751,6 +1747,8 @@
1751 1747
1752/* Display & cursor control */ 1748/* Display & cursor control */
1753 1749
1750/* dithering flag on Ironlake */
1751#define PIPE_ENABLE_DITHER (1 << 4)
1754/* Pipe A */ 1752/* Pipe A */
1755#define PIPEADSL 0x70000 1753#define PIPEADSL 0x70000
1756#define PIPEACONF 0x70008 1754#define PIPEACONF 0x70008
@@ -1818,7 +1816,7 @@
1818#define DSPFW_PLANEB_SHIFT 8 1816#define DSPFW_PLANEB_SHIFT 8
1819#define DSPFW2 0x70038 1817#define DSPFW2 0x70038
1820#define DSPFW_CURSORA_MASK 0x00003f00 1818#define DSPFW_CURSORA_MASK 0x00003f00
1821#define DSPFW_CURSORA_SHIFT 16 1819#define DSPFW_CURSORA_SHIFT 8
1822#define DSPFW3 0x7003c 1820#define DSPFW3 0x7003c
1823#define DSPFW_HPLL_SR_EN (1<<31) 1821#define DSPFW_HPLL_SR_EN (1<<31)
1824#define DSPFW_CURSOR_SR_SHIFT 24 1822#define DSPFW_CURSOR_SR_SHIFT 24
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index d5ebb00a9d49..a3b90c9561dc 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev)
732 732
733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
734 734
735 /* Render Standby */
736 if (I915_HAS_RC6(dev)) {
737 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
738 dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
739 }
740
741 /* Hardware status page */ 735 /* Hardware status page */
742 dev_priv->saveHWS = I915_READ(HWS_PGA); 736 dev_priv->saveHWS = I915_READ(HWS_PGA);
743 737
@@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev)
793 787
794 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 788 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
795 789
796 /* Render Standby */
797 if (I915_HAS_RC6(dev)) {
798 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
799 I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
800 }
801
802 /* Hardware status page */ 790 /* Hardware status page */
803 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 791 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
804 792
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f27567747580..15fbc1b5a83e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -33,6 +33,8 @@
33#define SLAVE_ADDR1 0x70 33#define SLAVE_ADDR1 0x70
34#define SLAVE_ADDR2 0x72 34#define SLAVE_ADDR2 0x72
35 35
36static int panel_type;
37
36static void * 38static void *
37find_section(struct bdb_header *bdb, int section_id) 39find_section(struct bdb_header *bdb, int section_id)
38{ 40{
@@ -128,6 +130,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
128 dev_priv->lvds_dither = lvds_options->pixel_dither; 130 dev_priv->lvds_dither = lvds_options->pixel_dither;
129 if (lvds_options->panel_type == 0xff) 131 if (lvds_options->panel_type == 0xff)
130 return; 132 return;
133 panel_type = lvds_options->panel_type;
131 134
132 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 135 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
133 if (!lvds_lfp_data) 136 if (!lvds_lfp_data)
@@ -197,7 +200,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
197 memset(temp_mode, 0, sizeof(*temp_mode)); 200 memset(temp_mode, 0, sizeof(*temp_mode));
198 } 201 }
199 kfree(temp_mode); 202 kfree(temp_mode);
200 if (temp_downclock < panel_fixed_mode->clock) { 203 if (temp_downclock < panel_fixed_mode->clock &&
204 i915_lvds_downclock) {
201 dev_priv->lvds_downclock_avail = 1; 205 dev_priv->lvds_downclock_avail = 1;
202 dev_priv->lvds_downclock = temp_downclock; 206 dev_priv->lvds_downclock = temp_downclock;
203 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", 207 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
@@ -405,6 +409,34 @@ parse_driver_features(struct drm_i915_private *dev_priv,
405} 409}
406 410
407static void 411static void
412parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
413{
414 struct bdb_edp *edp;
415
416 edp = find_section(bdb, BDB_EDP);
417 if (!edp) {
418 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
419 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\
420 assume 18bpp panel color depth.\n");
421 dev_priv->edp_bpp = 18;
422 }
423 return;
424 }
425
426 switch ((edp->color_depth >> (panel_type * 2)) & 3) {
427 case EDP_18BPP:
428 dev_priv->edp_bpp = 18;
429 break;
430 case EDP_24BPP:
431 dev_priv->edp_bpp = 24;
432 break;
433 case EDP_30BPP:
434 dev_priv->edp_bpp = 30;
435 break;
436 }
437}
438
439static void
408parse_device_mapping(struct drm_i915_private *dev_priv, 440parse_device_mapping(struct drm_i915_private *dev_priv,
409 struct bdb_header *bdb) 441 struct bdb_header *bdb)
410{ 442{
@@ -521,6 +553,7 @@ intel_init_bios(struct drm_device *dev)
521 parse_sdvo_device_mapping(dev_priv, bdb); 553 parse_sdvo_device_mapping(dev_priv, bdb);
522 parse_device_mapping(dev_priv, bdb); 554 parse_device_mapping(dev_priv, bdb);
523 parse_driver_features(dev_priv, bdb); 555 parse_driver_features(dev_priv, bdb);
556 parse_edp(dev_priv, bdb);
524 557
525 pci_unmap_rom(pdev, bios); 558 pci_unmap_rom(pdev, bios);
526 559
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 425ac9d7f724..4c18514f6f80 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -98,6 +98,7 @@ struct vbios_data {
98#define BDB_SDVO_LVDS_PNP_IDS 24 98#define BDB_SDVO_LVDS_PNP_IDS 24
99#define BDB_SDVO_LVDS_POWER_SEQ 25 99#define BDB_SDVO_LVDS_POWER_SEQ 25
100#define BDB_TV_OPTIONS 26 100#define BDB_TV_OPTIONS 26
101#define BDB_EDP 27
101#define BDB_LVDS_OPTIONS 40 102#define BDB_LVDS_OPTIONS 40
102#define BDB_LVDS_LFP_DATA_PTRS 41 103#define BDB_LVDS_LFP_DATA_PTRS 41
103#define BDB_LVDS_LFP_DATA 42 104#define BDB_LVDS_LFP_DATA 42
@@ -426,6 +427,45 @@ struct bdb_driver_features {
426 u8 custom_vbt_version; 427 u8 custom_vbt_version;
427} __attribute__((packed)); 428} __attribute__((packed));
428 429
430#define EDP_18BPP 0
431#define EDP_24BPP 1
432#define EDP_30BPP 2
433#define EDP_RATE_1_62 0
434#define EDP_RATE_2_7 1
435#define EDP_LANE_1 0
436#define EDP_LANE_2 1
437#define EDP_LANE_4 3
438#define EDP_PREEMPHASIS_NONE 0
439#define EDP_PREEMPHASIS_3_5dB 1
440#define EDP_PREEMPHASIS_6dB 2
441#define EDP_PREEMPHASIS_9_5dB 3
442#define EDP_VSWING_0_4V 0
443#define EDP_VSWING_0_6V 1
444#define EDP_VSWING_0_8V 2
445#define EDP_VSWING_1_2V 3
446
447struct edp_power_seq {
448 u16 t3;
449 u16 t7;
450 u16 t9;
451 u16 t10;
452 u16 t12;
453} __attribute__ ((packed));
454
455struct edp_link_params {
456 u8 rate:4;
457 u8 lanes:4;
458 u8 preemphasis:4;
459 u8 vswing:4;
460} __attribute__ ((packed));
461
462struct bdb_edp {
463 struct edp_power_seq power_seqs[16];
464 u32 color_depth;
465 u32 sdrrs_msa_timing_delay;
466 struct edp_link_params link_params[16];
467} __attribute__ ((packed));
468
429bool intel_init_bios(struct drm_device *dev); 469bool intel_init_bios(struct drm_device *dev);
430 470
431/* 471/*
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9f3d3e563414..79dd4026586f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
157 adpa = I915_READ(PCH_ADPA); 157 adpa = I915_READ(PCH_ADPA);
158 158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160 /* disable HPD first */
161 I915_WRITE(PCH_ADPA, adpa);
162 (void)I915_READ(PCH_ADPA);
160 163
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS | 165 ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -548,4 +551,6 @@ void intel_crt_init(struct drm_device *dev)
548 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 551 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
549 552
550 drm_sysfs_connector_add(connector); 553 drm_sysfs_connector_add(connector);
554
555 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
551} 556}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52cd9b006da2..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -70,8 +70,6 @@ struct intel_limit {
70 intel_p2_t p2; 70 intel_p2_t p2;
71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
72 int, int, intel_clock_t *); 72 int, int, intel_clock_t *);
73 bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
74 int, int, intel_clock_t *);
75}; 73};
76 74
77#define I8XX_DOT_MIN 25000 75#define I8XX_DOT_MIN 25000
@@ -242,38 +240,93 @@ struct intel_limit {
242#define IRONLAKE_DOT_MAX 350000 240#define IRONLAKE_DOT_MAX 350000
243#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
244#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
245#define IRONLAKE_N_MIN 1
246#define IRONLAKE_N_MAX 5
247#define IRONLAKE_M_MIN 79
248#define IRONLAKE_M_MAX 118
249#define IRONLAKE_M1_MIN 12 243#define IRONLAKE_M1_MIN 12
250#define IRONLAKE_M1_MAX 23 244#define IRONLAKE_M1_MAX 22
251#define IRONLAKE_M2_MIN 5 245#define IRONLAKE_M2_MIN 5
252#define IRONLAKE_M2_MAX 9 246#define IRONLAKE_M2_MAX 9
253#define IRONLAKE_P_SDVO_DAC_MIN 5
254#define IRONLAKE_P_SDVO_DAC_MAX 80
255#define IRONLAKE_P_LVDS_MIN 28
256#define IRONLAKE_P_LVDS_MAX 112
257#define IRONLAKE_P1_MIN 1
258#define IRONLAKE_P1_MAX 8
259#define IRONLAKE_P2_SDVO_DAC_SLOW 10
260#define IRONLAKE_P2_SDVO_DAC_FAST 5
261#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
264 248
249/* We have parameter ranges for different type of outputs. */
250
251/* DAC & HDMI Refclk 120Mhz */
252#define IRONLAKE_DAC_N_MIN 1
253#define IRONLAKE_DAC_N_MAX 5
254#define IRONLAKE_DAC_M_MIN 79
255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
323
265static bool 324static bool
266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
267 int target, int refclk, intel_clock_t *best_clock); 326 int target, int refclk, intel_clock_t *best_clock);
268static bool 327static bool
269intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
270 int target, int refclk, intel_clock_t *best_clock);
271static bool
272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 328intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
273 int target, int refclk, intel_clock_t *best_clock); 329 int target, int refclk, intel_clock_t *best_clock);
274static bool
275intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
276 int target, int refclk, intel_clock_t *best_clock);
277 330
278static bool 331static bool
279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 332intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
@@ -294,7 +347,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
294 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 347 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
295 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 348 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
296 .find_pll = intel_find_best_PLL, 349 .find_pll = intel_find_best_PLL,
297 .find_reduced_pll = intel_find_best_reduced_PLL,
298}; 350};
299 351
300static const intel_limit_t intel_limits_i8xx_lvds = { 352static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -309,7 +361,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
309 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 361 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
310 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 362 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
311 .find_pll = intel_find_best_PLL, 363 .find_pll = intel_find_best_PLL,
312 .find_reduced_pll = intel_find_best_reduced_PLL,
313}; 364};
314 365
315static const intel_limit_t intel_limits_i9xx_sdvo = { 366static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -324,7 +375,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
324 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 375 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
325 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 376 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
326 .find_pll = intel_find_best_PLL, 377 .find_pll = intel_find_best_PLL,
327 .find_reduced_pll = intel_find_best_reduced_PLL,
328}; 378};
329 379
330static const intel_limit_t intel_limits_i9xx_lvds = { 380static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -342,7 +392,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
342 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 392 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
343 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 393 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
344 .find_pll = intel_find_best_PLL, 394 .find_pll = intel_find_best_PLL,
345 .find_reduced_pll = intel_find_best_reduced_PLL,
346}; 395};
347 396
348 /* below parameter and function is for G4X Chipset Family*/ 397 /* below parameter and function is for G4X Chipset Family*/
@@ -360,7 +409,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
360 .p2_fast = G4X_P2_SDVO_FAST 409 .p2_fast = G4X_P2_SDVO_FAST
361 }, 410 },
362 .find_pll = intel_g4x_find_best_PLL, 411 .find_pll = intel_g4x_find_best_PLL,
363 .find_reduced_pll = intel_g4x_find_best_PLL,
364}; 412};
365 413
366static const intel_limit_t intel_limits_g4x_hdmi = { 414static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -377,7 +425,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
377 .p2_fast = G4X_P2_HDMI_DAC_FAST 425 .p2_fast = G4X_P2_HDMI_DAC_FAST
378 }, 426 },
379 .find_pll = intel_g4x_find_best_PLL, 427 .find_pll = intel_g4x_find_best_PLL,
380 .find_reduced_pll = intel_g4x_find_best_PLL,
381}; 428};
382 429
383static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 430static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -402,7 +449,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
402 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 449 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
403 }, 450 },
404 .find_pll = intel_g4x_find_best_PLL, 451 .find_pll = intel_g4x_find_best_PLL,
405 .find_reduced_pll = intel_g4x_find_best_PLL,
406}; 452};
407 453
408static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 454static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -427,7 +473,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
427 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 473 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
428 }, 474 },
429 .find_pll = intel_g4x_find_best_PLL, 475 .find_pll = intel_g4x_find_best_PLL,
430 .find_reduced_pll = intel_g4x_find_best_PLL,
431}; 476};
432 477
433static const intel_limit_t intel_limits_g4x_display_port = { 478static const intel_limit_t intel_limits_g4x_display_port = {
@@ -465,7 +510,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
465 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 510 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
466 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 511 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
467 .find_pll = intel_find_best_PLL, 512 .find_pll = intel_find_best_PLL,
468 .find_reduced_pll = intel_find_best_reduced_PLL,
469}; 513};
470 514
471static const intel_limit_t intel_limits_pineview_lvds = { 515static const intel_limit_t intel_limits_pineview_lvds = {
@@ -481,46 +525,135 @@ static const intel_limit_t intel_limits_pineview_lvds = {
481 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 525 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
482 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 526 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
483 .find_pll = intel_find_best_PLL, 527 .find_pll = intel_find_best_PLL,
484 .find_reduced_pll = intel_find_best_reduced_PLL,
485}; 528};
486 529
487static const intel_limit_t intel_limits_ironlake_sdvo = { 530static const intel_limit_t intel_limits_ironlake_dac = {
531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
541 .p2_fast = IRONLAKE_DAC_P2_FAST },
542 .find_pll = intel_g4x_find_best_PLL,
543};
544
545static const intel_limit_t intel_limits_ironlake_single_lvds = {
546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
488 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
489 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
490 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
491 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
492 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
493 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
494 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, 567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
495 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
496 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
497 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
498 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
499 .find_pll = intel_ironlake_find_best_PLL, 572 .find_pll = intel_g4x_find_best_PLL,
500}; 573};
501 574
502static const intel_limit_t intel_limits_ironlake_lvds = { 575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
503 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
504 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
505 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
506 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
507 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
508 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
509 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, 582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
510 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
511 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
512 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
513 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
514 .find_pll = intel_ironlake_find_best_PLL, 587 .find_pll = intel_g4x_find_best_PLL,
588};
589
590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
602 .find_pll = intel_g4x_find_best_PLL,
603};
604
605static const intel_limit_t intel_limits_ironlake_display_port = {
606 .dot = { .min = IRONLAKE_DOT_MIN,
607 .max = IRONLAKE_DOT_MAX },
608 .vco = { .min = IRONLAKE_VCO_MIN,
609 .max = IRONLAKE_VCO_MAX},
610 .n = { .min = IRONLAKE_DP_N_MIN,
611 .max = IRONLAKE_DP_N_MAX },
612 .m = { .min = IRONLAKE_DP_M_MIN,
613 .max = IRONLAKE_DP_M_MAX },
614 .m1 = { .min = IRONLAKE_M1_MIN,
615 .max = IRONLAKE_M1_MAX },
616 .m2 = { .min = IRONLAKE_M2_MIN,
617 .max = IRONLAKE_M2_MAX },
618 .p = { .min = IRONLAKE_DP_P_MIN,
619 .max = IRONLAKE_DP_P_MAX },
620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
621 .max = IRONLAKE_DP_P1_MAX},
622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
623 .p2_slow = IRONLAKE_DP_P2_SLOW,
624 .p2_fast = IRONLAKE_DP_P2_FAST },
625 .find_pll = intel_find_pll_ironlake_dp,
515}; 626};
516 627
517static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
518{ 629{
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
519 const intel_limit_t *limit; 632 const intel_limit_t *limit;
520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 633 int refclk = 120;
521 limit = &intel_limits_ironlake_lvds; 634
635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
653 HAS_eDP)
654 limit = &intel_limits_ironlake_display_port;
522 else 655 else
523 limit = &intel_limits_ironlake_sdvo; 656 limit = &intel_limits_ironlake_dac;
524 657
525 return limit; 658 return limit;
526} 659}
@@ -737,46 +870,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
737 return (err != target); 870 return (err != target);
738} 871}
739 872
740
741static bool
742intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
743 int target, int refclk, intel_clock_t *best_clock)
744
745{
746 struct drm_device *dev = crtc->dev;
747 intel_clock_t clock;
748 int err = target;
749 bool found = false;
750
751 memcpy(&clock, best_clock, sizeof(intel_clock_t));
752
753 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
754 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
755 /* m1 is always 0 in Pineview */
756 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
757 break;
758 for (clock.n = limit->n.min; clock.n <= limit->n.max;
759 clock.n++) {
760 int this_err;
761
762 intel_clock(dev, refclk, &clock);
763
764 if (!intel_PLL_is_valid(crtc, &clock))
765 continue;
766
767 this_err = abs(clock.dot - target);
768 if (this_err < err) {
769 *best_clock = clock;
770 err = this_err;
771 found = true;
772 }
773 }
774 }
775 }
776
777 return found;
778}
779
780static bool 873static bool
781intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 874intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
782 int target, int refclk, intel_clock_t *best_clock) 875 int target, int refclk, intel_clock_t *best_clock)
@@ -791,7 +884,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
791 found = false; 884 found = false;
792 885
793 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
794 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 887 int lvds_reg;
888
889 if (IS_IRONLAKE(dev))
890 lvds_reg = PCH_LVDS;
891 else
892 lvds_reg = LVDS;
893 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
795 LVDS_CLKB_POWER_UP) 894 LVDS_CLKB_POWER_UP)
796 clock.p2 = limit->p2.p2_fast; 895 clock.p2 = limit->p2.p2_fast;
797 else 896 else
@@ -839,6 +938,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
839{ 938{
840 struct drm_device *dev = crtc->dev; 939 struct drm_device *dev = crtc->dev;
841 intel_clock_t clock; 940 intel_clock_t clock;
941
942 /* return directly when it is eDP */
943 if (HAS_eDP)
944 return true;
945
842 if (target < 200000) { 946 if (target < 200000) {
843 clock.n = 1; 947 clock.n = 1;
844 clock.p1 = 2; 948 clock.p1 = 2;
@@ -857,68 +961,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
857 return true; 961 return true;
858} 962}
859 963
860static bool
861intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
862 int target, int refclk, intel_clock_t *best_clock)
863{
864 struct drm_device *dev = crtc->dev;
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 intel_clock_t clock;
867 int err_most = 47;
868 int err_min = 10000;
869
870 /* eDP has only 2 clock choice, no n/m/p setting */
871 if (HAS_eDP)
872 return true;
873
874 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
875 return intel_find_pll_ironlake_dp(limit, crtc, target,
876 refclk, best_clock);
877
878 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
879 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
880 LVDS_CLKB_POWER_UP)
881 clock.p2 = limit->p2.p2_fast;
882 else
883 clock.p2 = limit->p2.p2_slow;
884 } else {
885 if (target < limit->p2.dot_limit)
886 clock.p2 = limit->p2.p2_slow;
887 else
888 clock.p2 = limit->p2.p2_fast;
889 }
890
891 memset(best_clock, 0, sizeof(*best_clock));
892 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
893 /* based on hardware requriment prefer smaller n to precision */
894 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
895 /* based on hardware requirment prefere larger m1,m2 */
896 for (clock.m1 = limit->m1.max;
897 clock.m1 >= limit->m1.min; clock.m1--) {
898 for (clock.m2 = limit->m2.max;
899 clock.m2 >= limit->m2.min; clock.m2--) {
900 int this_err;
901
902 intel_clock(dev, refclk, &clock);
903 if (!intel_PLL_is_valid(crtc, &clock))
904 continue;
905 this_err = abs((10000 - (target*10000/clock.dot)));
906 if (this_err < err_most) {
907 *best_clock = clock;
908 /* found on first matching */
909 goto out;
910 } else if (this_err < err_min) {
911 *best_clock = clock;
912 err_min = this_err;
913 }
914 }
915 }
916 }
917 }
918out:
919 return true;
920}
921
922/* DisplayPort has only two frequencies, 162MHz and 270MHz */ 964/* DisplayPort has only two frequencies, 162MHz and 270MHz */
923static bool 965static bool
924intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 966intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -989,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
989 1031
990 /* enable it... */ 1032 /* enable it... */
991 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1033 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1034 if (IS_I945GM(dev))
1035 fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
992 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1036 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
993 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1037 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
994 if (obj_priv->tiling_mode != I915_TILING_NONE) 1038 if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1282,7 +1326,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1282 return ret; 1326 return ret;
1283 } 1327 }
1284 1328
1285 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 1329 ret = i915_gem_object_set_to_display_plane(obj);
1286 if (ret != 0) { 1330 if (ret != 0) {
1287 i915_gem_object_unpin(obj); 1331 i915_gem_object_unpin(obj);
1288 mutex_unlock(&dev->struct_mutex); 1332 mutex_unlock(&dev->struct_mutex);
@@ -1493,6 +1537,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1493 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1537 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1494 u32 temp; 1538 u32 temp;
1495 int tries = 5, j, n; 1539 int tries = 5, j, n;
1540 u32 pipe_bpc;
1541
1542 temp = I915_READ(pipeconf_reg);
1543 pipe_bpc = temp & PIPE_BPC_MASK;
1496 1544
1497 /* XXX: When our outputs are all unaware of DPMS modes other than off 1545 /* XXX: When our outputs are all unaware of DPMS modes other than off
1498 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1546 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1524,6 +1572,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1524 1572
1525 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1573 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1526 temp = I915_READ(fdi_rx_reg); 1574 temp = I915_READ(fdi_rx_reg);
1575 /*
1576 * make the BPC in FDI Rx be consistent with that in
1577 * pipeconf reg.
1578 */
1579 temp &= ~(0x7 << 16);
1580 temp |= (pipe_bpc << 11);
1527 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1581 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
1528 FDI_SEL_PCDCLK | 1582 FDI_SEL_PCDCLK |
1529 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ 1583 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
@@ -1666,6 +1720,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1666 1720
1667 /* enable PCH transcoder */ 1721 /* enable PCH transcoder */
1668 temp = I915_READ(transconf_reg); 1722 temp = I915_READ(transconf_reg);
1723 /*
1724 * make the BPC in transcoder be consistent with
1725 * that in pipeconf reg.
1726 */
1727 temp &= ~PIPE_BPC_MASK;
1728 temp |= pipe_bpc;
1669 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 1729 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1670 I915_READ(transconf_reg); 1730 I915_READ(transconf_reg);
1671 1731
@@ -1697,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1697 case DRM_MODE_DPMS_OFF: 1757 case DRM_MODE_DPMS_OFF:
1698 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1758 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1699 1759
1760 drm_vblank_off(dev, pipe);
1700 /* Disable display plane */ 1761 /* Disable display plane */
1701 temp = I915_READ(dspcntr_reg); 1762 temp = I915_READ(dspcntr_reg);
1702 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 1763 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -1745,6 +1806,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1745 I915_READ(fdi_tx_reg); 1806 I915_READ(fdi_tx_reg);
1746 1807
1747 temp = I915_READ(fdi_rx_reg); 1808 temp = I915_READ(fdi_rx_reg);
1809 /* BPC in FDI rx is consistent with that in pipeconf */
1810 temp &= ~(0x07 << 16);
1811 temp |= (pipe_bpc << 11);
1748 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 1812 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1749 I915_READ(fdi_rx_reg); 1813 I915_READ(fdi_rx_reg);
1750 1814
@@ -1789,7 +1853,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1789 } 1853 }
1790 } 1854 }
1791 } 1855 }
1792 1856 temp = I915_READ(transconf_reg);
1857 /* BPC in transcoder is consistent with that in pipeconf */
1858 temp &= ~PIPE_BPC_MASK;
1859 temp |= pipe_bpc;
1860 I915_WRITE(transconf_reg, temp);
1861 I915_READ(transconf_reg);
1793 udelay(100); 1862 udelay(100);
1794 1863
1795 /* disable PCH DPLL */ 1864 /* disable PCH DPLL */
@@ -2448,7 +2517,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2448 * A value of 5us seems to be a good balance; safe for very low end 2517 * A value of 5us seems to be a good balance; safe for very low end
2449 * platforms but not overly aggressive on lower latency configs. 2518 * platforms but not overly aggressive on lower latency configs.
2450 */ 2519 */
2451const static int latency_ns = 5000; 2520static const int latency_ns = 5000;
2452 2521
2453static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 2522static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2454{ 2523{
@@ -2559,7 +2628,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2559 /* Calc sr entries for one plane configs */ 2628 /* Calc sr entries for one plane configs */
2560 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2629 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2561 /* self-refresh has much higher latency */ 2630 /* self-refresh has much higher latency */
2562 const static int sr_latency_ns = 12000; 2631 static const int sr_latency_ns = 12000;
2563 2632
2564 sr_clock = planea_clock ? planea_clock : planeb_clock; 2633 sr_clock = planea_clock ? planea_clock : planeb_clock;
2565 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2634 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2570,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2570 sr_entries = roundup(sr_entries / cacheline_size, 1); 2639 sr_entries = roundup(sr_entries / cacheline_size, 1);
2571 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2640 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2572 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2641 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2642 } else {
2643 /* Turn off self refresh if both pipes are enabled */
2644 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2645 & ~FW_BLC_SELF_EN);
2573 } 2646 }
2574 2647
2575 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 2648 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2598,7 +2671,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2598 /* Calc sr entries for one plane configs */ 2671 /* Calc sr entries for one plane configs */
2599 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2672 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2600 /* self-refresh has much higher latency */ 2673 /* self-refresh has much higher latency */
2601 const static int sr_latency_ns = 12000; 2674 static const int sr_latency_ns = 12000;
2602 2675
2603 sr_clock = planea_clock ? planea_clock : planeb_clock; 2676 sr_clock = planea_clock ? planea_clock : planeb_clock;
2604 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2677 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2613,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2613 srwm = 1; 2686 srwm = 1;
2614 srwm &= 0x3f; 2687 srwm &= 0x3f;
2615 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2688 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2689 } else {
2690 /* Turn off self refresh if both pipes are enabled */
2691 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2692 & ~FW_BLC_SELF_EN);
2616 } 2693 }
2617 2694
2618 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2695 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2667,7 +2744,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2667 if (HAS_FW_BLC(dev) && sr_hdisplay && 2744 if (HAS_FW_BLC(dev) && sr_hdisplay &&
2668 (!planea_clock || !planeb_clock)) { 2745 (!planea_clock || !planeb_clock)) {
2669 /* self-refresh has much higher latency */ 2746 /* self-refresh has much higher latency */
2670 const static int sr_latency_ns = 6000; 2747 static const int sr_latency_ns = 6000;
2671 2748
2672 sr_clock = planea_clock ? planea_clock : planeb_clock; 2749 sr_clock = planea_clock ? planea_clock : planeb_clock;
2673 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2750 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2681,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2681 if (srwm < 0) 2758 if (srwm < 0)
2682 srwm = 1; 2759 srwm = 1;
2683 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2761 } else {
2762 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2764 & ~FW_BLC_SELF_EN);
2684 } 2765 }
2685 2766
2686 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2906,10 +2987,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2906 return -EINVAL; 2987 return -EINVAL;
2907 } 2988 }
2908 2989
2909 if (is_lvds && limit->find_reduced_pll && 2990 if (is_lvds && dev_priv->lvds_downclock_avail) {
2910 dev_priv->lvds_downclock_avail) { 2991 has_reduced_clock = limit->find_pll(limit, crtc,
2911 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
2912 has_reduced_clock = limit->find_reduced_pll(limit, crtc,
2913 dev_priv->lvds_downclock, 2992 dev_priv->lvds_downclock,
2914 refclk, 2993 refclk,
2915 &reduced_clock); 2994 &reduced_clock);
@@ -2969,6 +3048,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2969 3048
2970 /* determine panel color depth */ 3049 /* determine panel color depth */
2971 temp = I915_READ(pipeconf_reg); 3050 temp = I915_READ(pipeconf_reg);
3051 temp &= ~PIPE_BPC_MASK;
3052 if (is_lvds) {
3053 int lvds_reg = I915_READ(PCH_LVDS);
3054 /* the BPC will be 6 if it is 18-bit LVDS panel */
3055 if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
3056 temp |= PIPE_8BPC;
3057 else
3058 temp |= PIPE_6BPC;
3059 } else if (is_edp) {
3060 switch (dev_priv->edp_bpp/3) {
3061 case 8:
3062 temp |= PIPE_8BPC;
3063 break;
3064 case 10:
3065 temp |= PIPE_10BPC;
3066 break;
3067 case 6:
3068 temp |= PIPE_6BPC;
3069 break;
3070 case 12:
3071 temp |= PIPE_12BPC;
3072 break;
3073 }
3074 } else
3075 temp |= PIPE_8BPC;
3076 I915_WRITE(pipeconf_reg, temp);
3077 I915_READ(pipeconf_reg);
2972 3078
2973 switch (temp & PIPE_BPC_MASK) { 3079 switch (temp & PIPE_BPC_MASK) {
2974 case PIPE_8BPC: 3080 case PIPE_8BPC:
@@ -3195,7 +3301,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3195 * appropriately here, but we need to look more thoroughly into how 3301 * appropriately here, but we need to look more thoroughly into how
3196 * panels behave in the two modes. 3302 * panels behave in the two modes.
3197 */ 3303 */
3198 3304 /* set the dithering flag */
3305 if (IS_I965G(dev)) {
3306 if (dev_priv->lvds_dither) {
3307 if (IS_IRONLAKE(dev))
3308 pipeconf |= PIPE_ENABLE_DITHER;
3309 else
3310 lvds |= LVDS_ENABLE_DITHER;
3311 } else {
3312 if (IS_IRONLAKE(dev))
3313 pipeconf &= ~PIPE_ENABLE_DITHER;
3314 else
3315 lvds &= ~LVDS_ENABLE_DITHER;
3316 }
3317 }
3199 I915_WRITE(lvds_reg, lvds); 3318 I915_WRITE(lvds_reg, lvds);
3200 I915_READ(lvds_reg); 3319 I915_READ(lvds_reg);
3201 } 3320 }
@@ -3385,7 +3504,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3385 3504
3386 /* we only need to pin inside GTT if cursor is non-phy */ 3505 /* we only need to pin inside GTT if cursor is non-phy */
3387 mutex_lock(&dev->struct_mutex); 3506 mutex_lock(&dev->struct_mutex);
3388 if (!dev_priv->cursor_needs_physical) { 3507 if (!dev_priv->info->cursor_needs_physical) {
3389 ret = i915_gem_object_pin(bo, PAGE_SIZE); 3508 ret = i915_gem_object_pin(bo, PAGE_SIZE);
3390 if (ret) { 3509 if (ret) {
3391 DRM_ERROR("failed to pin cursor bo\n"); 3510 DRM_ERROR("failed to pin cursor bo\n");
@@ -3420,7 +3539,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3420 I915_WRITE(base, addr); 3539 I915_WRITE(base, addr);
3421 3540
3422 if (intel_crtc->cursor_bo) { 3541 if (intel_crtc->cursor_bo) {
3423 if (dev_priv->cursor_needs_physical) { 3542 if (dev_priv->info->cursor_needs_physical) {
3424 if (intel_crtc->cursor_bo != bo) 3543 if (intel_crtc->cursor_bo != bo)
3425 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 3544 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
3426 } else 3545 } else
@@ -3779,125 +3898,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
3779 queue_work(dev_priv->wq, &dev_priv->idle_work); 3898 queue_work(dev_priv->wq, &dev_priv->idle_work);
3780} 3899}
3781 3900
3782void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3783{
3784 drm_i915_private_t *dev_priv = dev->dev_private;
3785
3786 if (IS_IRONLAKE(dev))
3787 return;
3788
3789 if (!dev_priv->render_reclock_avail) {
3790 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3791 return;
3792 }
3793
3794 /* Restore render clock frequency to original value */
3795 if (IS_G4X(dev) || IS_I9XX(dev))
3796 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
3797 else if (IS_I85X(dev))
3798 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
3799 DRM_DEBUG_DRIVER("increasing render clock frequency\n");
3800
3801 /* Schedule downclock */
3802 if (schedule)
3803 mod_timer(&dev_priv->idle_timer, jiffies +
3804 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
3805}
3806
3807void intel_decrease_renderclock(struct drm_device *dev)
3808{
3809 drm_i915_private_t *dev_priv = dev->dev_private;
3810
3811 if (IS_IRONLAKE(dev))
3812 return;
3813
3814 if (!dev_priv->render_reclock_avail) {
3815 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3816 return;
3817 }
3818
3819 if (IS_G4X(dev)) {
3820 u16 gcfgc;
3821
3822 /* Adjust render clock... */
3823 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3824
3825 /* Down to minimum... */
3826 gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
3827 gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
3828
3829 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3830 } else if (IS_I965G(dev)) {
3831 u16 gcfgc;
3832
3833 /* Adjust render clock... */
3834 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3835
3836 /* Down to minimum... */
3837 gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
3838 gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
3839
3840 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3841 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
3842 u16 gcfgc;
3843
3844 /* Adjust render clock... */
3845 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3846
3847 /* Down to minimum... */
3848 gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
3849 gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
3850
3851 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3852 } else if (IS_I915G(dev)) {
3853 u16 gcfgc;
3854
3855 /* Adjust render clock... */
3856 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3857
3858 /* Down to minimum... */
3859 gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
3860 gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
3861
3862 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3863 } else if (IS_I85X(dev)) {
3864 u16 hpllcc;
3865
3866 /* Adjust render clock... */
3867 pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
3868
3869 /* Up to maximum... */
3870 hpllcc &= ~GC_CLOCK_CONTROL_MASK;
3871 hpllcc |= GC_CLOCK_133_200;
3872
3873 pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
3874 }
3875 DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
3876}
3877
3878/* Note that no increase function is needed for this - increase_renderclock()
3879 * will also rewrite these bits
3880 */
3881void intel_decrease_displayclock(struct drm_device *dev)
3882{
3883 if (IS_IRONLAKE(dev))
3884 return;
3885
3886 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
3887 IS_I915GM(dev)) {
3888 u16 gcfgc;
3889
3890 /* Adjust render clock... */
3891 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3892
3893 /* Down to minimum... */
3894 gcfgc &= ~0xf0;
3895 gcfgc |= 0x80;
3896
3897 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3898 }
3899}
3900
3901#define CRTC_IDLE_TIMEOUT 1000 /* ms */ 3901#define CRTC_IDLE_TIMEOUT 1000 /* ms */
3902 3902
3903static void intel_crtc_idle_timer(unsigned long arg) 3903static void intel_crtc_idle_timer(unsigned long arg)
@@ -4011,12 +4011,6 @@ static void intel_idle_update(struct work_struct *work)
4011 4011
4012 mutex_lock(&dev->struct_mutex); 4012 mutex_lock(&dev->struct_mutex);
4013 4013
4014 /* GPU isn't processing, downclock it. */
4015 if (!dev_priv->busy) {
4016 intel_decrease_renderclock(dev);
4017 intel_decrease_displayclock(dev);
4018 }
4019
4020 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4014 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4021 /* Skip inactive CRTCs */ 4015 /* Skip inactive CRTCs */
4022 if (!crtc->fb) 4016 if (!crtc->fb)
@@ -4050,13 +4044,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4050 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4044 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4051 return; 4045 return;
4052 4046
4053 if (!dev_priv->busy) { 4047 if (!dev_priv->busy)
4054 dev_priv->busy = true; 4048 dev_priv->busy = true;
4055 intel_increase_renderclock(dev, true); 4049 else
4056 } else {
4057 mod_timer(&dev_priv->idle_timer, jiffies + 4050 mod_timer(&dev_priv->idle_timer, jiffies +
4058 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 4051 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4059 }
4060 4052
4061 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4053 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4062 if (!crtc->fb) 4054 if (!crtc->fb)
@@ -4089,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
4089struct intel_unpin_work { 4081struct intel_unpin_work {
4090 struct work_struct work; 4082 struct work_struct work;
4091 struct drm_device *dev; 4083 struct drm_device *dev;
4092 struct drm_gem_object *obj; 4084 struct drm_gem_object *old_fb_obj;
4085 struct drm_gem_object *pending_flip_obj;
4093 struct drm_pending_vblank_event *event; 4086 struct drm_pending_vblank_event *event;
4094 int pending; 4087 int pending;
4095}; 4088};
@@ -4100,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
4100 container_of(__work, struct intel_unpin_work, work); 4093 container_of(__work, struct intel_unpin_work, work);
4101 4094
4102 mutex_lock(&work->dev->struct_mutex); 4095 mutex_lock(&work->dev->struct_mutex);
4103 i915_gem_object_unpin(work->obj); 4096 i915_gem_object_unpin(work->old_fb_obj);
4104 drm_gem_object_unreference(work->obj); 4097 drm_gem_object_unreference(work->pending_flip_obj);
4098 drm_gem_object_unreference(work->old_fb_obj);
4105 mutex_unlock(&work->dev->struct_mutex); 4099 mutex_unlock(&work->dev->struct_mutex);
4106 kfree(work); 4100 kfree(work);
4107} 4101}
@@ -4124,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4124 spin_lock_irqsave(&dev->event_lock, flags); 4118 spin_lock_irqsave(&dev->event_lock, flags);
4125 work = intel_crtc->unpin_work; 4119 work = intel_crtc->unpin_work;
4126 if (work == NULL || !work->pending) { 4120 if (work == NULL || !work->pending) {
4121 if (work && !work->pending) {
4122 obj_priv = work->pending_flip_obj->driver_private;
4123 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4124 obj_priv,
4125 atomic_read(&obj_priv->pending_flip));
4126 }
4127 spin_unlock_irqrestore(&dev->event_lock, flags); 4127 spin_unlock_irqrestore(&dev->event_lock, flags);
4128 return; 4128 return;
4129 } 4129 }
@@ -4144,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4144 4144
4145 spin_unlock_irqrestore(&dev->event_lock, flags); 4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4146 4146
4147 obj_priv = work->obj->driver_private; 4147 obj_priv = work->pending_flip_obj->driver_private;
4148 if (atomic_dec_and_test(&obj_priv->pending_flip)) 4148
4149 /* Initial scanout buffer will have a 0 pending flip count */
4150 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4151 atomic_dec_and_test(&obj_priv->pending_flip))
4149 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4152 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4150 schedule_work(&work->work); 4153 schedule_work(&work->work);
4151} 4154}
@@ -4158,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
4158 unsigned long flags; 4161 unsigned long flags;
4159 4162
4160 spin_lock_irqsave(&dev->event_lock, flags); 4163 spin_lock_irqsave(&dev->event_lock, flags);
4161 if (intel_crtc->unpin_work) 4164 if (intel_crtc->unpin_work) {
4162 intel_crtc->unpin_work->pending = 1; 4165 intel_crtc->unpin_work->pending = 1;
4166 } else {
4167 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4168 }
4163 spin_unlock_irqrestore(&dev->event_lock, flags); 4169 spin_unlock_irqrestore(&dev->event_lock, flags);
4164} 4170}
4165 4171
@@ -4175,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4175 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4176 struct intel_unpin_work *work; 4182 struct intel_unpin_work *work;
4177 unsigned long flags; 4183 unsigned long flags;
4178 int ret; 4184 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4185 int ret, pipesrc;
4179 RING_LOCALS; 4186 RING_LOCALS;
4180 4187
4181 work = kzalloc(sizeof *work, GFP_KERNEL); 4188 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4187,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4187 work->event = event; 4194 work->event = event;
4188 work->dev = crtc->dev; 4195 work->dev = crtc->dev;
4189 intel_fb = to_intel_framebuffer(crtc->fb); 4196 intel_fb = to_intel_framebuffer(crtc->fb);
4190 work->obj = intel_fb->obj; 4197 work->old_fb_obj = intel_fb->obj;
4191 INIT_WORK(&work->work, intel_unpin_work_fn); 4198 INIT_WORK(&work->work, intel_unpin_work_fn);
4192 4199
4193 /* We borrow the event spin lock for protecting unpin_work */ 4200 /* We borrow the event spin lock for protecting unpin_work */
4194 spin_lock_irqsave(&dev->event_lock, flags); 4201 spin_lock_irqsave(&dev->event_lock, flags);
4195 if (intel_crtc->unpin_work) { 4202 if (intel_crtc->unpin_work) {
4203 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4196 spin_unlock_irqrestore(&dev->event_lock, flags); 4204 spin_unlock_irqrestore(&dev->event_lock, flags);
4197 kfree(work); 4205 kfree(work);
4198 mutex_unlock(&dev->struct_mutex); 4206 mutex_unlock(&dev->struct_mutex);
@@ -4206,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4206 4214
4207 ret = intel_pin_and_fence_fb_obj(dev, obj); 4215 ret = intel_pin_and_fence_fb_obj(dev, obj);
4208 if (ret != 0) { 4216 if (ret != 0) {
4217 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4218 obj->driver_private);
4209 kfree(work); 4219 kfree(work);
4220 intel_crtc->unpin_work = NULL;
4210 mutex_unlock(&dev->struct_mutex); 4221 mutex_unlock(&dev->struct_mutex);
4211 return ret; 4222 return ret;
4212 } 4223 }
4213 4224
4214 /* Reference the old fb object for the scheduled work. */ 4225 /* Reference the objects for the scheduled work. */
4215 drm_gem_object_reference(work->obj); 4226 drm_gem_object_reference(work->old_fb_obj);
4227 drm_gem_object_reference(obj);
4216 4228
4217 crtc->fb = fb; 4229 crtc->fb = fb;
4218 i915_gem_object_flush_write_domain(obj); 4230 i915_gem_object_flush_write_domain(obj);
4219 drm_vblank_get(dev, intel_crtc->pipe); 4231 drm_vblank_get(dev, intel_crtc->pipe);
4220 obj_priv = obj->driver_private; 4232 obj_priv = obj->driver_private;
4221 atomic_inc(&obj_priv->pending_flip); 4233 atomic_inc(&obj_priv->pending_flip);
4234 work->pending_flip_obj = obj;
4222 4235
4223 BEGIN_LP_RING(4); 4236 BEGIN_LP_RING(4);
4224 OUT_RING(MI_DISPLAY_FLIP | 4237 OUT_RING(MI_DISPLAY_FLIP |
@@ -4226,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4226 OUT_RING(fb->pitch); 4239 OUT_RING(fb->pitch);
4227 if (IS_I965G(dev)) { 4240 if (IS_I965G(dev)) {
4228 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4241 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4229 OUT_RING((fb->width << 16) | fb->height); 4242 pipesrc = I915_READ(pipesrc_reg);
4243 OUT_RING(pipesrc & 0x0fff0fff);
4230 } else { 4244 } else {
4231 OUT_RING(obj_priv->gtt_offset); 4245 OUT_RING(obj_priv->gtt_offset);
4232 OUT_RING(MI_NOOP); 4246 OUT_RING(MI_NOOP);
@@ -4400,29 +4414,43 @@ static void intel_setup_outputs(struct drm_device *dev)
4400 bool found = false; 4414 bool found = false;
4401 4415
4402 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4416 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4417 DRM_DEBUG_KMS("probing SDVOB\n");
4403 found = intel_sdvo_init(dev, SDVOB); 4418 found = intel_sdvo_init(dev, SDVOB);
4404 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 4419 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
4420 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
4405 intel_hdmi_init(dev, SDVOB); 4421 intel_hdmi_init(dev, SDVOB);
4422 }
4406 4423
4407 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 4424 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
4425 DRM_DEBUG_KMS("probing DP_B\n");
4408 intel_dp_init(dev, DP_B); 4426 intel_dp_init(dev, DP_B);
4427 }
4409 } 4428 }
4410 4429
4411 /* Before G4X SDVOC doesn't have its own detect register */ 4430 /* Before G4X SDVOC doesn't have its own detect register */
4412 4431
4413 if (I915_READ(SDVOB) & SDVO_DETECTED) 4432 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4433 DRM_DEBUG_KMS("probing SDVOC\n");
4414 found = intel_sdvo_init(dev, SDVOC); 4434 found = intel_sdvo_init(dev, SDVOC);
4435 }
4415 4436
4416 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 4437 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
4417 4438
4418 if (SUPPORTS_INTEGRATED_HDMI(dev)) 4439 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
4440 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
4419 intel_hdmi_init(dev, SDVOC); 4441 intel_hdmi_init(dev, SDVOC);
4420 if (SUPPORTS_INTEGRATED_DP(dev)) 4442 }
4443 if (SUPPORTS_INTEGRATED_DP(dev)) {
4444 DRM_DEBUG_KMS("probing DP_C\n");
4421 intel_dp_init(dev, DP_C); 4445 intel_dp_init(dev, DP_C);
4446 }
4422 } 4447 }
4423 4448
4424 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 4449 if (SUPPORTS_INTEGRATED_DP(dev) &&
4450 (I915_READ(DP_D) & DP_DETECTED)) {
4451 DRM_DEBUG_KMS("probing DP_D\n");
4425 intel_dp_init(dev, DP_D); 4452 intel_dp_init(dev, DP_D);
4453 }
4426 } else if (IS_I8XX(dev)) 4454 } else if (IS_I8XX(dev))
4427 intel_dvo_init(dev); 4455 intel_dvo_init(dev);
4428 4456
@@ -4527,6 +4555,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
4527 .fb_changed = intelfb_probe, 4555 .fb_changed = intelfb_probe,
4528}; 4556};
4529 4557
4558static struct drm_gem_object *
4559intel_alloc_power_context(struct drm_device *dev)
4560{
4561 struct drm_gem_object *pwrctx;
4562 int ret;
4563
4564 pwrctx = drm_gem_object_alloc(dev, 4096);
4565 if (!pwrctx) {
4566 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4567 return NULL;
4568 }
4569
4570 mutex_lock(&dev->struct_mutex);
4571 ret = i915_gem_object_pin(pwrctx, 4096);
4572 if (ret) {
4573 DRM_ERROR("failed to pin power context: %d\n", ret);
4574 goto err_unref;
4575 }
4576
4577 ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4578 if (ret) {
4579 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
4580 goto err_unpin;
4581 }
4582 mutex_unlock(&dev->struct_mutex);
4583
4584 return pwrctx;
4585
4586err_unpin:
4587 i915_gem_object_unpin(pwrctx);
4588err_unref:
4589 drm_gem_object_unreference(pwrctx);
4590 mutex_unlock(&dev->struct_mutex);
4591 return NULL;
4592}
4593
4530void intel_init_clock_gating(struct drm_device *dev) 4594void intel_init_clock_gating(struct drm_device *dev)
4531{ 4595{
4532 struct drm_i915_private *dev_priv = dev->dev_private; 4596 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4579,42 +4643,27 @@ void intel_init_clock_gating(struct drm_device *dev)
4579 * GPU can automatically power down the render unit if given a page 4643 * GPU can automatically power down the render unit if given a page
4580 * to save state. 4644 * to save state.
4581 */ 4645 */
4582 if (I915_HAS_RC6(dev)) { 4646 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
4583 struct drm_gem_object *pwrctx; 4647 struct drm_i915_gem_object *obj_priv = NULL;
4584 struct drm_i915_gem_object *obj_priv;
4585 int ret;
4586 4648
4587 if (dev_priv->pwrctx) { 4649 if (dev_priv->pwrctx) {
4588 obj_priv = dev_priv->pwrctx->driver_private; 4650 obj_priv = dev_priv->pwrctx->driver_private;
4589 } else { 4651 } else {
4590 pwrctx = drm_gem_object_alloc(dev, 4096); 4652 struct drm_gem_object *pwrctx;
4591 if (!pwrctx) {
4592 DRM_DEBUG("failed to alloc power context, "
4593 "RC6 disabled\n");
4594 goto out;
4595 }
4596 4653
4597 ret = i915_gem_object_pin(pwrctx, 4096); 4654 pwrctx = intel_alloc_power_context(dev);
4598 if (ret) { 4655 if (pwrctx) {
4599 DRM_ERROR("failed to pin power context: %d\n", 4656 dev_priv->pwrctx = pwrctx;
4600 ret); 4657 obj_priv = pwrctx->driver_private;
4601 drm_gem_object_unreference(pwrctx);
4602 goto out;
4603 } 4658 }
4604
4605 i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4606
4607 dev_priv->pwrctx = pwrctx;
4608 obj_priv = pwrctx->driver_private;
4609 } 4659 }
4610 4660
4611 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); 4661 if (obj_priv) {
4612 I915_WRITE(MCHBAR_RENDER_STANDBY, 4662 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
4613 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 4663 I915_WRITE(MCHBAR_RENDER_STANDBY,
4664 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
4665 }
4614 } 4666 }
4615
4616out:
4617 return;
4618} 4667}
4619 4668
4620/* Set up chip specific display functions */ 4669/* Set up chip specific display functions */
@@ -4770,7 +4819,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
4770 del_timer_sync(&intel_crtc->idle_timer); 4819 del_timer_sync(&intel_crtc->idle_timer);
4771 } 4820 }
4772 4821
4773 intel_increase_renderclock(dev, false);
4774 del_timer_sync(&dev_priv->idle_timer); 4822 del_timer_sync(&dev_priv->idle_timer);
4775 4823
4776 if (dev_priv->display.disable_fbc) 4824 if (dev_priv->display.disable_fbc)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4e7aa8b7b938..439506cefc14 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -125,9 +125,15 @@ intel_dp_link_clock(uint8_t link_bw)
125 125
126/* I think this is a fiction */ 126/* I think this is a fiction */
127static int 127static int
128intel_dp_link_required(int pixel_clock) 128intel_dp_link_required(struct drm_device *dev,
129 struct intel_output *intel_output, int pixel_clock)
129{ 130{
130 return pixel_clock * 3; 131 struct drm_i915_private *dev_priv = dev->dev_private;
132
133 if (IS_eDP(intel_output))
134 return (pixel_clock * dev_priv->edp_bpp) / 8;
135 else
136 return pixel_clock * 3;
131} 137}
132 138
133static int 139static int
@@ -138,7 +144,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
138 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); 144 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
139 int max_lanes = intel_dp_max_lane_count(intel_output); 145 int max_lanes = intel_dp_max_lane_count(intel_output);
140 146
141 if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) 147 if (intel_dp_link_required(connector->dev, intel_output, mode->clock)
148 > max_link_clock * max_lanes)
142 return MODE_CLOCK_HIGH; 149 return MODE_CLOCK_HIGH;
143 150
144 if (mode->clock < 10000) 151 if (mode->clock < 10000)
@@ -492,7 +499,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
492 for (clock = 0; clock <= max_clock; clock++) { 499 for (clock = 0; clock <= max_clock; clock++) {
493 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; 500 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
494 501
495 if (intel_dp_link_required(mode->clock) <= link_avail) { 502 if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
503 <= link_avail) {
496 dp_priv->link_bw = bws[clock]; 504 dp_priv->link_bw = bws[clock];
497 dp_priv->lane_count = lane_count; 505 dp_priv->lane_count = lane_count;
498 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); 506 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
@@ -1289,53 +1297,7 @@ intel_dp_hot_plug(struct intel_output *intel_output)
1289 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1297 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1290 intel_dp_check_link_status(intel_output); 1298 intel_dp_check_link_status(intel_output);
1291} 1299}
1292/* 1300
1293 * Enumerate the child dev array parsed from VBT to check whether
1294 * the given DP is present.
1295 * If it is present, return 1.
1296 * If it is not present, return false.
1297 * If no child dev is parsed from VBT, it is assumed that the given
1298 * DP is present.
1299 */
1300static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
1301{
1302 struct drm_i915_private *dev_priv = dev->dev_private;
1303 struct child_device_config *p_child;
1304 int i, dp_port, ret;
1305
1306 if (!dev_priv->child_dev_num)
1307 return 1;
1308
1309 dp_port = 0;
1310 if (dp_reg == DP_B || dp_reg == PCH_DP_B)
1311 dp_port = PORT_IDPB;
1312 else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
1313 dp_port = PORT_IDPC;
1314 else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
1315 dp_port = PORT_IDPD;
1316
1317 ret = 0;
1318 for (i = 0; i < dev_priv->child_dev_num; i++) {
1319 p_child = dev_priv->child_dev + i;
1320 /*
1321 * If the device type is not DP, continue.
1322 */
1323 if (p_child->device_type != DEVICE_TYPE_DP &&
1324 p_child->device_type != DEVICE_TYPE_eDP)
1325 continue;
1326 /* Find the eDP port */
1327 if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
1328 ret = 1;
1329 break;
1330 }
1331 /* Find the DP port */
1332 if (p_child->dvo_port == dp_port) {
1333 ret = 1;
1334 break;
1335 }
1336 }
1337 return ret;
1338}
1339void 1301void
1340intel_dp_init(struct drm_device *dev, int output_reg) 1302intel_dp_init(struct drm_device *dev, int output_reg)
1341{ 1303{
@@ -1345,10 +1307,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1345 struct intel_dp_priv *dp_priv; 1307 struct intel_dp_priv *dp_priv;
1346 const char *name = NULL; 1308 const char *name = NULL;
1347 1309
1348 if (!dp_is_present_in_vbt(dev, output_reg)) {
1349 DRM_DEBUG_KMS("DP is not present. Ignore it\n");
1350 return;
1351 }
1352 intel_output = kcalloc(sizeof(struct intel_output) + 1310 intel_output = kcalloc(sizeof(struct intel_output) +
1353 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1311 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1354 if (!intel_output) 1312 if (!intel_output)
@@ -1373,11 +1331,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1373 else if (output_reg == DP_D || output_reg == PCH_DP_D) 1331 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1374 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1332 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1375 1333
1376 if (IS_eDP(intel_output)) { 1334 if (IS_eDP(intel_output))
1377 intel_output->crtc_mask = (1 << 1);
1378 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 1335 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1379 } else 1336
1380 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1337 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1381 connector->interlace_allowed = true; 1338 connector->interlace_allowed = true;
1382 connector->doublescan_allowed = 0; 1339 connector->doublescan_allowed = 0;
1383 1340
@@ -1402,14 +1359,20 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1402 break; 1359 break;
1403 case DP_B: 1360 case DP_B:
1404 case PCH_DP_B: 1361 case PCH_DP_B:
1362 dev_priv->hotplug_supported_mask |=
1363 HDMIB_HOTPLUG_INT_STATUS;
1405 name = "DPDDC-B"; 1364 name = "DPDDC-B";
1406 break; 1365 break;
1407 case DP_C: 1366 case DP_C:
1408 case PCH_DP_C: 1367 case PCH_DP_C:
1368 dev_priv->hotplug_supported_mask |=
1369 HDMIC_HOTPLUG_INT_STATUS;
1409 name = "DPDDC-C"; 1370 name = "DPDDC-C";
1410 break; 1371 break;
1411 case DP_D: 1372 case DP_D:
1412 case PCH_DP_D: 1373 case PCH_DP_D:
1374 dev_priv->hotplug_supported_mask |=
1375 HDMID_HOTPLUG_INT_STATUS;
1413 name = "DPDDC-D"; 1376 name = "DPDDC-D";
1414 break; 1377 break;
1415 } 1378 }
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
151 ret = i915_gem_object_pin(fbo, PAGE_SIZE); 151 ret = i915_gem_object_pin(fbo, 64*1024);
152 if (ret) { 152 if (ret) {
153 DRM_ERROR("failed to pin fb: %d\n", ret); 153 DRM_ERROR("failed to pin fb: %d\n", ret);
154 goto out_unref; 154 goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f04dbbe7d400..0e268deed761 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -225,52 +225,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
225 .destroy = intel_hdmi_enc_destroy, 225 .destroy = intel_hdmi_enc_destroy,
226}; 226};
227 227
228/*
229 * Enumerate the child dev array parsed from VBT to check whether
230 * the given HDMI is present.
231 * If it is present, return 1.
232 * If it is not present, return false.
233 * If no child dev is parsed from VBT, it assumes that the given
234 * HDMI is present.
235 */
236static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct child_device_config *p_child;
240 int i, hdmi_port, ret;
241
242 if (!dev_priv->child_dev_num)
243 return 1;
244
245 if (hdmi_reg == SDVOB)
246 hdmi_port = DVO_B;
247 else if (hdmi_reg == SDVOC)
248 hdmi_port = DVO_C;
249 else if (hdmi_reg == HDMIB)
250 hdmi_port = DVO_B;
251 else if (hdmi_reg == HDMIC)
252 hdmi_port = DVO_C;
253 else if (hdmi_reg == HDMID)
254 hdmi_port = DVO_D;
255 else
256 return 0;
257
258 ret = 0;
259 for (i = 0; i < dev_priv->child_dev_num; i++) {
260 p_child = dev_priv->child_dev + i;
261 /*
262 * If the device type is not HDMI, continue.
263 */
264 if (p_child->device_type != DEVICE_TYPE_HDMI)
265 continue;
266 /* Find the HDMI port */
267 if (p_child->dvo_port == hdmi_port) {
268 ret = 1;
269 break;
270 }
271 }
272 return ret;
273}
274void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 228void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
275{ 229{
276 struct drm_i915_private *dev_priv = dev->dev_private; 230 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -278,10 +232,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
278 struct intel_output *intel_output; 232 struct intel_output *intel_output;
279 struct intel_hdmi_priv *hdmi_priv; 233 struct intel_hdmi_priv *hdmi_priv;
280 234
281 if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
282 DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
283 return;
284 }
285 intel_output = kcalloc(sizeof(struct intel_output) + 235 intel_output = kcalloc(sizeof(struct intel_output) +
286 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 236 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
287 if (!intel_output) 237 if (!intel_output)
@@ -303,21 +253,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
303 if (sdvox_reg == SDVOB) { 253 if (sdvox_reg == SDVOB) {
304 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 254 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
305 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 255 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
256 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
306 } else if (sdvox_reg == SDVOC) { 257 } else if (sdvox_reg == SDVOC) {
307 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 258 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
308 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 259 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
260 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
309 } else if (sdvox_reg == HDMIB) { 261 } else if (sdvox_reg == HDMIB) {
310 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 262 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
311 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 263 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
312 "HDMIB"); 264 "HDMIB");
265 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
313 } else if (sdvox_reg == HDMIC) { 266 } else if (sdvox_reg == HDMIC) {
314 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 267 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
315 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 268 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
316 "HDMIC"); 269 "HDMIC");
270 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
317 } else if (sdvox_reg == HDMID) { 271 } else if (sdvox_reg == HDMID) {
318 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 272 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
319 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 273 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
320 "HDMID"); 274 "HDMID");
275 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
321 } 276 }
322 if (!intel_output->ddc_bus) 277 if (!intel_output->ddc_bus)
323 goto err_connector; 278 goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3118ce274e67..c2e8a45780d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -602,12 +602,47 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
602/* Some lid devices report incorrect lid status, assume they're connected */ 602/* Some lid devices report incorrect lid status, assume they're connected */
603static const struct dmi_system_id bad_lid_status[] = { 603static const struct dmi_system_id bad_lid_status[] = {
604 { 604 {
605 .ident = "Compaq nx9020",
606 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
608 DMI_MATCH(DMI_BOARD_NAME, "3084"),
609 },
610 },
611 {
612 .ident = "Samsung SX20S",
613 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
616 },
617 },
618 {
605 .ident = "Aspire One", 619 .ident = "Aspire One",
606 .matches = { 620 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 621 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
608 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), 622 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
609 }, 623 },
610 }, 624 },
625 {
626 .ident = "Aspire 1810T",
627 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
630 },
631 },
632 {
633 .ident = "PC-81005",
634 .matches = {
635 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
637 },
638 },
639 {
640 .ident = "Clevo M5x0N",
641 .matches = {
642 DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
643 DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
644 },
645 },
611 { } 646 { }
612}; 647};
613 648
@@ -622,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
622{ 657{
623 enum drm_connector_status status = connector_status_connected; 658 enum drm_connector_status status = connector_status_connected;
624 659
625 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
626 status = connector_status_disconnected; 661 status = connector_status_disconnected;
627 662
628 return status; 663 return status;
@@ -679,7 +714,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
679 struct drm_i915_private *dev_priv = 714 struct drm_i915_private *dev_priv =
680 container_of(nb, struct drm_i915_private, lid_notifier); 715 container_of(nb, struct drm_i915_private, lid_notifier);
681 struct drm_device *dev = dev_priv->dev; 716 struct drm_device *dev = dev_priv->dev;
717 struct drm_connector *connector = dev_priv->int_lvds_connector;
682 718
719 /*
720 * check and update the status of LVDS connector after receiving
721 * the LID nofication event.
722 */
723 if (connector)
724 connector->status = connector->funcs->detect(connector);
683 if (!acpi_lid_open()) { 725 if (!acpi_lid_open()) {
684 dev_priv->modeset_on_lid = 1; 726 dev_priv->modeset_on_lid = 1;
685 return NOTIFY_OK; 727 return NOTIFY_OK;
@@ -854,65 +896,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
854 { } /* terminating entry */ 896 { } /* terminating entry */
855}; 897};
856 898
857#ifdef CONFIG_ACPI
858/*
859 * check_lid_device -- check whether @handle is an ACPI LID device.
860 * @handle: ACPI device handle
861 * @level : depth in the ACPI namespace tree
862 * @context: the number of LID device when we find the device
863 * @rv: a return value to fill if desired (Not use)
864 */
865static acpi_status
866check_lid_device(acpi_handle handle, u32 level, void *context,
867 void **return_value)
868{
869 struct acpi_device *acpi_dev;
870 int *lid_present = context;
871
872 acpi_dev = NULL;
873 /* Get the acpi device for device handle */
874 if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
875 /* If there is no ACPI device for handle, return */
876 return AE_OK;
877 }
878
879 if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
880 *lid_present = 1;
881
882 return AE_OK;
883}
884
885/**
886 * check whether there exists the ACPI LID device by enumerating the ACPI
887 * device tree.
888 */
889static int intel_lid_present(void)
890{
891 int lid_present = 0;
892
893 if (acpi_disabled) {
894 /* If ACPI is disabled, there is no ACPI device tree to
895 * check, so assume the LID device would have been present.
896 */
897 return 1;
898 }
899
900 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
901 ACPI_UINT32_MAX,
902 check_lid_device, NULL, &lid_present, NULL);
903
904 return lid_present;
905}
906#else
907static int intel_lid_present(void)
908{
909 /* In the absence of ACPI built in, assume that the LID device would
910 * have been present.
911 */
912 return 1;
913}
914#endif
915
916/** 899/**
917 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID 900 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
918 * @dev: drm device 901 * @dev: drm device
@@ -957,7 +940,8 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
957 } 940 }
958 } 941 }
959 mutex_unlock(&dev->mode_config.mutex); 942 mutex_unlock(&dev->mode_config.mutex);
960 if (temp_downclock < panel_fixed_mode->clock) { 943 if (temp_downclock < panel_fixed_mode->clock &&
944 i915_lvds_downclock) {
961 /* We found the downclock for LVDS. */ 945 /* We found the downclock for LVDS. */
962 dev_priv->lvds_downclock_avail = 1; 946 dev_priv->lvds_downclock_avail = 1;
963 dev_priv->lvds_downclock = temp_downclock; 947 dev_priv->lvds_downclock = temp_downclock;
@@ -1031,12 +1015,8 @@ void intel_lvds_init(struct drm_device *dev)
1031 if (dmi_check_system(intel_no_lvds)) 1015 if (dmi_check_system(intel_no_lvds))
1032 return; 1016 return;
1033 1017
1034 /* 1018 if (!lvds_is_present_in_vbt(dev)) {
1035 * Assume LVDS is present if there's an ACPI lid device or if the 1019 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
1036 * device is present in the VBT.
1037 */
1038 if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
1039 DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
1040 return; 1020 return;
1041 } 1021 }
1042 1022
@@ -1180,6 +1160,8 @@ out:
1180 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1160 DRM_DEBUG_KMS("lid notifier registration failed\n");
1181 dev_priv->lid_notifier.notifier_call = NULL; 1161 dev_priv->lid_notifier.notifier_call = NULL;
1182 } 1162 }
1163 /* keep the LVDS connector */
1164 dev_priv->int_lvds_connector = connector;
1183 drm_sysfs_connector_add(connector); 1165 drm_sysfs_connector_add(connector);
1184 return; 1166 return;
1185 1167
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 24a3dc99716c..82678d30ab06 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -462,14 +462,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
462} 462}
463 463
464/** 464/**
465 * Don't check status code from this as it switches the bus back to the 465 * Try to read the response after issuie the DDC switch command. But it
466 * SDVO chips which defeats the purpose of doing a bus switch in the first 466 * is noted that we must do the action of reading response and issuing DDC
467 * place. 467 * switch command in one I2C transaction. Otherwise when we try to start
468 * another I2C transaction after issuing the DDC bus switch, it will be
469 * switched to the internal SDVO register.
468 */ 470 */
469static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, 471static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
470 u8 target) 472 u8 target)
471{ 473{
472 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); 474 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
475 u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
476 struct i2c_msg msgs[] = {
477 {
478 .addr = sdvo_priv->slave_addr >> 1,
479 .flags = 0,
480 .len = 2,
481 .buf = out_buf,
482 },
483 /* the following two are to read the response */
484 {
485 .addr = sdvo_priv->slave_addr >> 1,
486 .flags = 0,
487 .len = 1,
488 .buf = cmd_buf,
489 },
490 {
491 .addr = sdvo_priv->slave_addr >> 1,
492 .flags = I2C_M_RD,
493 .len = 1,
494 .buf = ret_value,
495 },
496 };
497
498 intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
499 &target, 1);
500 /* write the DDC switch command argument */
501 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
502
503 out_buf[0] = SDVO_I2C_OPCODE;
504 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
505 cmd_buf[0] = SDVO_I2C_CMD_STATUS;
506 cmd_buf[1] = 0;
507 ret_value[0] = 0;
508 ret_value[1] = 0;
509
510 ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
511 if (ret != 3) {
512 /* failure in I2C transfer */
513 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
514 return;
515 }
516 if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
517 DRM_DEBUG_KMS("DDC switch command returns response %d\n",
518 ret_value[0]);
519 return;
520 }
521 return;
473} 522}
474 523
475static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) 524static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
@@ -1579,6 +1628,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1579 edid = drm_get_edid(&intel_output->base, 1628 edid = drm_get_edid(&intel_output->base,
1580 intel_output->ddc_bus); 1629 intel_output->ddc_bus);
1581 1630
1631 /* This is only applied to SDVO cards with multiple outputs */
1632 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
1633 uint8_t saved_ddc, temp_ddc;
1634 saved_ddc = sdvo_priv->ddc_bus;
1635 temp_ddc = sdvo_priv->ddc_bus >> 1;
1636 /*
1637 * Don't use the 1 as the argument of DDC bus switch to get
1638 * the EDID. It is used for SDVO SPD ROM.
1639 */
1640 while(temp_ddc > 1) {
1641 sdvo_priv->ddc_bus = temp_ddc;
1642 edid = drm_get_edid(&intel_output->base,
1643 intel_output->ddc_bus);
1644 if (edid) {
1645 /*
1646 * When we can get the EDID, maybe it is the
1647 * correct DDC bus. Update it.
1648 */
1649 sdvo_priv->ddc_bus = temp_ddc;
1650 break;
1651 }
1652 temp_ddc >>= 1;
1653 }
1654 if (edid == NULL)
1655 sdvo_priv->ddc_bus = saved_ddc;
1656 }
1582 /* when there is no edid and no monitor is connected with VGA 1657 /* when there is no edid and no monitor is connected with VGA
1583 * port, try to use the CRT ddc to read the EDID for DVI-connector 1658 * port, try to use the CRT ddc to read the EDID for DVI-connector
1584 */ 1659 */
@@ -2270,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2270 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2271 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2272 (1 << INTEL_ANALOG_CLONE_BIT); 2347 (1 << INTEL_ANALOG_CLONE_BIT);
2348 } else if (flags & SDVO_OUTPUT_CVBS0) {
2349
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2351 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2352 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2353 sdvo_priv->is_tv = true;
2354 intel_output->needs_tv_clock = true;
2355 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2273 } else if (flags & SDVO_OUTPUT_LVDS0) { 2356 } else if (flags & SDVO_OUTPUT_LVDS0) {
2274 2357
2275 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2358 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
@@ -2662,6 +2745,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2662 2745
2663bool intel_sdvo_init(struct drm_device *dev, int output_device) 2746bool intel_sdvo_init(struct drm_device *dev, int output_device)
2664{ 2747{
2748 struct drm_i915_private *dev_priv = dev->dev_private;
2665 struct drm_connector *connector; 2749 struct drm_connector *connector;
2666 struct intel_output *intel_output; 2750 struct intel_output *intel_output;
2667 struct intel_sdvo_priv *sdvo_priv; 2751 struct intel_sdvo_priv *sdvo_priv;
@@ -2708,10 +2792,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2708 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2792 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
2709 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2793 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2710 "SDVOB/VGA DDC BUS"); 2794 "SDVOB/VGA DDC BUS");
2795 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2711 } else { 2796 } else {
2712 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2797 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
2713 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2798 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2714 "SDVOC/VGA DDC BUS"); 2799 "SDVOC/VGA DDC BUS");
2800 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2715 } 2801 }
2716 2802
2717 if (intel_output->ddc_bus == NULL) 2803 if (intel_output->ddc_bus == NULL)
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index b1bc1ea182b8..1175429da102 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -30,12 +30,11 @@ config DRM_NOUVEAU_DEBUG
30 via debugfs. 30 via debugfs.
31 31
32menu "I2C encoder or helper chips" 32menu "I2C encoder or helper chips"
33 depends on DRM && I2C 33 depends on DRM && DRM_KMS_HELPER && I2C
34 34
35config DRM_I2C_CH7006 35config DRM_I2C_CH7006
36 tristate "Chrontel ch7006 TV encoder" 36 tristate "Chrontel ch7006 TV encoder"
37 depends on DRM_NOUVEAU 37 default m if DRM_NOUVEAU
38 default m
39 help 38 help
40 Support for Chrontel ch7006 and similar TV encoders, found 39 Support for Chrontel ch7006 and similar TV encoders, found
41 on some nVidia video cards. 40 on some nVidia video cards.
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf488247a16..48227e744753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
90{ 90{
91 int result; 91 int result;
92 92
93 if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, 93 if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
94 &result)) 94 &result))
95 return -ENODEV; 95 return -ENODEV;
96 96
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
98 98
99 if (result & 0x1) { /* Stamina mode - disable the external GPU */ 99 if (result) { /* Ensure that the external GPU is enabled */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
101 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
102 NULL);
103 } else { /* Stamina mode - disable the external GPU */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
101 NULL); 105 NULL);
102 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
103 NULL); 107 NULL);
104 } else { /* Ensure that the external GPU is enabled */
105 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
107 NULL);
108 } 108 }
109 109
110 return 0; 110 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index ba143972769f..0e9cd1d49130 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -310,63 +310,22 @@ valid_reg(struct nvbios *bios, uint32_t reg)
310 struct drm_device *dev = bios->dev; 310 struct drm_device *dev = bios->dev;
311 311
312 /* C51 has misaligned regs on purpose. Marvellous */ 312 /* C51 has misaligned regs on purpose. Marvellous */
313 if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) { 313 if (reg & 0x2 ||
314 NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n", 314 (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
315 reg); 315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
316 return 0; 316
317 } 317 /* warn on C51 regs that haven't been verified accessible in tracing */
318 /*
319 * Warn on C51 regs that have not been verified accessible in
320 * mmiotracing
321 */
322 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && 318 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
323 reg != 0x130d && reg != 0x1311 && reg != 0x60081d) 319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
324 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", 320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
325 reg); 321 reg);
326 322
327 /* Trust the init scripts on G80 */ 323 if (reg >= (8*1024*1024)) {
328 if (dev_priv->card_type >= NV_50) 324 NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
329 return 1; 325 return 0;
330
331 #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
332 if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
333 return 1;
334 if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
335 return 1;
336 if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
337 return 1;
338 if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
339 (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
340 return 1;
341 if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
342 WITHIN(reg, 0xc000, 0x48))
343 return 1;
344 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
345 return 1;
346 if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
347 if (reg == 0x00011014 || reg == 0x00020328)
348 return 1;
349 if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
350 return 1;
351 } 326 }
352 if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
353 return 1;
354 if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
355 return 1;
356 if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
357 return 1;
358 if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
359 return 1;
360 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
361 return 1;
362 if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
363 WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
364 return 1;
365 #undef WITHIN
366
367 NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
368 327
369 return 0; 328 return 1;
370} 329}
371 330
372static bool 331static bool
@@ -1906,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1906 1865
1907 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1908 1867
1909 if (dev_priv->card_type >= NV_50) 1868 if (dev_priv->card_type >= NV_40)
1910 return 1; 1869 return 1;
1911 1870
1912 /* 1871 /*
@@ -3196,16 +3155,25 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3196 } 3155 }
3197#ifdef __powerpc__ 3156#ifdef __powerpc__
3198 /* Powerbook specific quirks */ 3157 /* Powerbook specific quirks */
3199 if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329)) 3158 if ((dev->pci_device & 0xffff) == 0x0179 ||
3200 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); 3159 (dev->pci_device & 0xffff) == 0x0189 ||
3201 if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) { 3160 (dev->pci_device & 0xffff) == 0x0329) {
3202 if (script == LVDS_PANEL_ON) { 3161 if (script == LVDS_RESET) {
3203 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31)); 3162 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3204 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); 3163
3205 } 3164 } else if (script == LVDS_PANEL_ON) {
3206 if (script == LVDS_PANEL_OFF) { 3165 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3207 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31)); 3166 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3208 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); 3167 | (1 << 31));
3168 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3169 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3170
3171 } else if (script == LVDS_PANEL_OFF) {
3172 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3173 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3174 & ~(1 << 31));
3175 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3176 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3209 } 3177 }
3210 } 3178 }
3211#endif 3179#endif
@@ -3797,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3797 */ 3765 */
3798 3766
3799 struct drm_nouveau_private *dev_priv = dev->dev_private; 3767 struct drm_nouveau_private *dev_priv = dev->dev_private;
3800 struct init_exec iexec = {true, false};
3801 struct nvbios *bios = &dev_priv->VBIOS; 3768 struct nvbios *bios = &dev_priv->VBIOS;
3802 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3769 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3803 uint8_t *otable = NULL; 3770 uint8_t *otable = NULL;
@@ -3877,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3877 } 3844 }
3878 } 3845 }
3879 3846
3880 bios->display.output = dcbent;
3881
3882 if (pxclk == 0) { 3847 if (pxclk == 0) {
3883 script = ROM16(otable[6]); 3848 script = ROM16(otable[6]);
3884 if (!script) { 3849 if (!script) {
@@ -3887,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3887 } 3852 }
3888 3853
3889 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 3854 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
3890 parse_init_table(bios, script, &iexec); 3855 nouveau_bios_run_init_table(dev, script, dcbent);
3891 } else 3856 } else
3892 if (pxclk == -1) { 3857 if (pxclk == -1) {
3893 script = ROM16(otable[8]); 3858 script = ROM16(otable[8]);
@@ -3897,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3897 } 3862 }
3898 3863
3899 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 3864 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
3900 parse_init_table(bios, script, &iexec); 3865 nouveau_bios_run_init_table(dev, script, dcbent);
3901 } else 3866 } else
3902 if (pxclk == -2) { 3867 if (pxclk == -2) {
3903 if (table[4] >= 12) 3868 if (table[4] >= 12)
@@ -3910,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3910 } 3875 }
3911 3876
3912 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 3877 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
3913 parse_init_table(bios, script, &iexec); 3878 nouveau_bios_run_init_table(dev, script, dcbent);
3914 } else 3879 } else
3915 if (pxclk > 0) { 3880 if (pxclk > 0) {
3916 script = ROM16(otable[table[4] + i*6 + 2]); 3881 script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3922,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3922 } 3887 }
3923 3888
3924 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 3889 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
3925 parse_init_table(bios, script, &iexec); 3890 nouveau_bios_run_init_table(dev, script, dcbent);
3926 } else 3891 } else
3927 if (pxclk < 0) { 3892 if (pxclk < 0) {
3928 script = ROM16(otable[table[4] + i*6 + 4]); 3893 script = ROM16(otable[table[4] + i*6 + 4]);
@@ -3934,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3934 } 3899 }
3935 3900
3936 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 3901 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
3937 parse_init_table(bios, script, &iexec); 3902 nouveau_bios_run_init_table(dev, script, dcbent);
3938 } 3903 }
3939 3904
3940 return 0; 3905 return 0;
@@ -5434,52 +5399,49 @@ static bool
5434parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, 5399parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5435 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5400 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5436{ 5401{
5437 if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 && 5402 switch (conn & 0x0000000f) {
5438 conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 && 5403 case 0:
5439 conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 && 5404 entry->type = OUTPUT_ANALOG;
5440 conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 && 5405 break;
5441 conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 && 5406 case 1:
5442 conn != 0xf2205004 && conn != 0xf2209004) { 5407 entry->type = OUTPUT_TV;
5443 NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n"); 5408 break;
5444 5409 case 2:
5445 /* cause output setting to fail for !TV, so message is seen */ 5410 case 3:
5446 if ((conn & 0xf) != 0x1)
5447 dcb->entries = 0;
5448
5449 return false;
5450 }
5451 /* most of the below is a "best guess" atm */
5452 entry->type = conn & 0xf;
5453 if (entry->type == 2)
5454 /* another way of specifying straps based lvds... */
5455 entry->type = OUTPUT_LVDS; 5411 entry->type = OUTPUT_LVDS;
5456 if (entry->type == 4) { /* digital */ 5412 break;
5457 if (conn & 0x10) 5413 case 4:
5458 entry->type = OUTPUT_LVDS; 5414 switch ((conn & 0x000000f0) >> 4) {
5459 else 5415 case 0:
5460 entry->type = OUTPUT_TMDS; 5416 entry->type = OUTPUT_TMDS;
5417 break;
5418 case 1:
5419 entry->type = OUTPUT_LVDS;
5420 break;
5421 default:
5422 NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
5423 (conn & 0x000000f0) >> 4);
5424 return false;
5425 }
5426 break;
5427 default:
5428 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
5429 return false;
5461 } 5430 }
5462 /* what's in bits 5-13? could be some encoder maker thing, in tv case */ 5431
5463 entry->i2c_index = (conn >> 14) & 0xf; 5432 entry->i2c_index = (conn & 0x0003c000) >> 14;
5464 /* raw heads field is in range 0-1, so move to 1-2 */ 5433 entry->heads = ((conn & 0x001c0000) >> 18) + 1;
5465 entry->heads = ((conn >> 18) & 0x7) + 1; 5434 entry->or = entry->heads; /* same as heads, hopefully safe enough */
5466 entry->location = (conn >> 21) & 0xf; 5435 entry->location = (conn & 0x01e00000) >> 21;
5467 /* unused: entry->bus = (conn >> 25) & 0x7; */ 5436 entry->bus = (conn & 0x0e000000) >> 25;
5468 /* set or to be same as heads -- hopefully safe enough */
5469 entry->or = entry->heads;
5470 entry->duallink_possible = false; 5437 entry->duallink_possible = false;
5471 5438
5472 switch (entry->type) { 5439 switch (entry->type) {
5473 case OUTPUT_ANALOG: 5440 case OUTPUT_ANALOG:
5474 entry->crtconf.maxfreq = (conf & 0xffff) * 10; 5441 entry->crtconf.maxfreq = (conf & 0xffff) * 10;
5475 break; 5442 break;
5476 case OUTPUT_LVDS: 5443 case OUTPUT_TV:
5477 /* 5444 entry->tvconf.has_component_output = false;
5478 * This is probably buried in conn's unknown bits.
5479 * This will upset EDID-ful models, if they exist
5480 */
5481 entry->lvdsconf.use_straps_for_mode = true;
5482 entry->lvdsconf.use_power_scripts = true;
5483 break; 5445 break;
5484 case OUTPUT_TMDS: 5446 case OUTPUT_TMDS:
5485 /* 5447 /*
@@ -5488,8 +5450,12 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5488 */ 5450 */
5489 fabricate_vga_output(dcb, entry->i2c_index, entry->heads); 5451 fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
5490 break; 5452 break;
5491 case OUTPUT_TV: 5453 case OUTPUT_LVDS:
5492 entry->tvconf.has_component_output = false; 5454 if ((conn & 0x00003f00) != 0x10)
5455 entry->lvdsconf.use_straps_for_mode = true;
5456 entry->lvdsconf.use_power_scripts = true;
5457 break;
5458 default:
5493 break; 5459 break;
5494 } 5460 }
5495 5461
@@ -5564,11 +5530,13 @@ void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
5564 dcb->entries = newentries; 5530 dcb->entries = newentries;
5565} 5531}
5566 5532
5567static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 5533static int
5534parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5568{ 5535{
5536 struct drm_nouveau_private *dev_priv = dev->dev_private;
5569 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5537 struct bios_parsed_dcb *bdcb = &bios->bdcb;
5570 struct parsed_dcb *dcb; 5538 struct parsed_dcb *dcb;
5571 uint16_t dcbptr, i2ctabptr = 0; 5539 uint16_t dcbptr = 0, i2ctabptr = 0;
5572 uint8_t *dcbtable; 5540 uint8_t *dcbtable;
5573 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 5541 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
5574 bool configblock = true; 5542 bool configblock = true;
@@ -5579,16 +5547,18 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool two
5579 dcb->entries = 0; 5547 dcb->entries = 0;
5580 5548
5581 /* get the offset from 0x36 */ 5549 /* get the offset from 0x36 */
5582 dcbptr = ROM16(bios->data[0x36]); 5550 if (dev_priv->card_type > NV_04) {
5551 dcbptr = ROM16(bios->data[0x36]);
5552 if (dcbptr == 0x0000)
5553 NV_WARN(dev, "No output data (DCB) found in BIOS\n");
5554 }
5583 5555
5556 /* this situation likely means a really old card, pre DCB */
5584 if (dcbptr == 0x0) { 5557 if (dcbptr == 0x0) {
5585 NV_WARN(dev, "No output data (DCB) found in BIOS, " 5558 NV_INFO(dev, "Assuming a CRT output exists\n");
5586 "assuming a CRT output exists\n");
5587 /* this situation likely means a really old card, pre DCB */
5588 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); 5559 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5589 5560
5590 if (nv04_tv_identify(dev, 5561 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
5591 bios->legacy.i2c_indices.tv) >= 0)
5592 fabricate_tv_output(dcb, twoHeads); 5562 fabricate_tv_output(dcb, twoHeads);
5593 5563
5594 return 0; 5564 return 0;
@@ -5892,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5892 struct nvbios *bios = &dev_priv->VBIOS; 5862 struct nvbios *bios = &dev_priv->VBIOS;
5893 struct init_exec iexec = { true, false }; 5863 struct init_exec iexec = { true, false };
5894 5864
5865 mutex_lock(&bios->lock);
5895 bios->display.output = dcbent; 5866 bios->display.output = dcbent;
5896 parse_init_table(bios, table, &iexec); 5867 parse_init_table(bios, table, &iexec);
5897 bios->display.output = NULL; 5868 bios->display.output = NULL;
5869 mutex_unlock(&bios->lock);
5898} 5870}
5899 5871
5900static bool NVInitVBIOS(struct drm_device *dev) 5872static bool NVInitVBIOS(struct drm_device *dev)
@@ -5903,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5903 struct nvbios *bios = &dev_priv->VBIOS; 5875 struct nvbios *bios = &dev_priv->VBIOS;
5904 5876
5905 memset(bios, 0, sizeof(struct nvbios)); 5877 memset(bios, 0, sizeof(struct nvbios));
5878 mutex_init(&bios->lock);
5906 bios->dev = dev; 5879 bios->dev = dev;
5907 5880
5908 if (!NVShadowVBIOS(dev, bios->data)) 5881 if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 058e98c76d89..fd94bd6dc264 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
205 struct drm_device *dev; 205 struct drm_device *dev;
206 struct nouveau_bios_info pub; 206 struct nouveau_bios_info pub;
207 207
208 struct mutex lock;
209
208 uint8_t data[NV_PROM_SIZE]; 210 uint8_t data[NV_PROM_SIZE];
209 unsigned int length; 211 unsigned int length;
210 bool execute; 212 bool execute;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 0cad6d834eb2..028719fddf76 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -33,10 +33,13 @@
33#include "nouveau_drv.h" 33#include "nouveau_drv.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35 35
36#include <linux/log2.h>
37
36static void 38static void
37nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 39nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
38{ 40{
39 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 41 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
42 struct drm_device *dev = dev_priv->dev;
40 struct nouveau_bo *nvbo = nouveau_bo(bo); 43 struct nouveau_bo *nvbo = nouveau_bo(bo);
41 44
42 ttm_bo_kunmap(&nvbo->kmap); 45 ttm_bo_kunmap(&nvbo->kmap);
@@ -44,12 +47,87 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
44 if (unlikely(nvbo->gem)) 47 if (unlikely(nvbo->gem))
45 DRM_ERROR("bo %p still attached to GEM object\n", bo); 48 DRM_ERROR("bo %p still attached to GEM object\n", bo);
46 49
50 if (nvbo->tile)
51 nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
52
47 spin_lock(&dev_priv->ttm.bo_list_lock); 53 spin_lock(&dev_priv->ttm.bo_list_lock);
48 list_del(&nvbo->head); 54 list_del(&nvbo->head);
49 spin_unlock(&dev_priv->ttm.bo_list_lock); 55 spin_unlock(&dev_priv->ttm.bo_list_lock);
50 kfree(nvbo); 56 kfree(nvbo);
51} 57}
52 58
59static void
60nouveau_bo_fixup_align(struct drm_device *dev,
61 uint32_t tile_mode, uint32_t tile_flags,
62 int *align, int *size)
63{
64 struct drm_nouveau_private *dev_priv = dev->dev_private;
65
66 /*
67 * Some of the tile_flags have a periodic structure of N*4096 bytes,
68 * align to to that as well as the page size. Align the size to the
69 * appropriate boundaries. This does imply that sizes are rounded up
70 * 3-7 pages, so be aware of this and do not waste memory by allocating
71 * many small buffers.
72 */
73 if (dev_priv->card_type == NV_50) {
74 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
75 int i;
76
77 switch (tile_flags) {
78 case 0x1800:
79 case 0x2800:
80 case 0x4800:
81 case 0x7a00:
82 if (is_power_of_2(block_size)) {
83 for (i = 1; i < 10; i++) {
84 *align = 12 * i * block_size;
85 if (!(*align % 65536))
86 break;
87 }
88 } else {
89 for (i = 1; i < 10; i++) {
90 *align = 8 * i * block_size;
91 if (!(*align % 65536))
92 break;
93 }
94 }
95 *size = roundup(*size, *align);
96 break;
97 default:
98 break;
99 }
100
101 } else {
102 if (tile_mode) {
103 if (dev_priv->chipset >= 0x40) {
104 *align = 65536;
105 *size = roundup(*size, 64 * tile_mode);
106
107 } else if (dev_priv->chipset >= 0x30) {
108 *align = 32768;
109 *size = roundup(*size, 64 * tile_mode);
110
111 } else if (dev_priv->chipset >= 0x20) {
112 *align = 16384;
113 *size = roundup(*size, 64 * tile_mode);
114
115 } else if (dev_priv->chipset >= 0x10) {
116 *align = 16384;
117 *size = roundup(*size, 32 * tile_mode);
118 }
119 }
120 }
121
122 /* ALIGN works only on powers of two. */
123 *size = roundup(*size, PAGE_SIZE);
124
125 if (dev_priv->card_type == NV_50) {
126 *size = roundup(*size, 65536);
127 *align = max(65536, *align);
128 }
129}
130
53int 131int
54nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, 132nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
55 int size, int align, uint32_t flags, uint32_t tile_mode, 133 int size, int align, uint32_t flags, uint32_t tile_mode,
@@ -58,7 +136,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
58{ 136{
59 struct drm_nouveau_private *dev_priv = dev->dev_private; 137 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 struct nouveau_bo *nvbo; 138 struct nouveau_bo *nvbo;
61 int ret, n = 0; 139 int ret = 0;
62 140
63 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 141 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
64 if (!nvbo) 142 if (!nvbo)
@@ -70,59 +148,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
70 nvbo->tile_mode = tile_mode; 148 nvbo->tile_mode = tile_mode;
71 nvbo->tile_flags = tile_flags; 149 nvbo->tile_flags = tile_flags;
72 150
73 /* 151 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
74 * Some of the tile_flags have a periodic structure of N*4096 bytes,
75 * align to to that as well as the page size. Overallocate memory to
76 * avoid corruption of other buffer objects.
77 */
78 switch (tile_flags) {
79 case 0x1800:
80 case 0x2800:
81 case 0x4800:
82 case 0x7a00:
83 if (dev_priv->chipset >= 0xA0) {
84 /* This is based on high end cards with 448 bits
85 * memory bus, could be different elsewhere.*/
86 size += 6 * 28672;
87 /* 8 * 28672 is the actual alignment requirement,
88 * but we must also align to page size. */
89 align = 2 * 8 * 28672;
90 } else if (dev_priv->chipset >= 0x90) {
91 size += 3 * 16384;
92 align = 12 * 16834;
93 } else {
94 size += 3 * 8192;
95 /* 12 * 8192 is the actual alignment requirement,
96 * but we must also align to page size. */
97 align = 2 * 12 * 8192;
98 }
99 break;
100 default:
101 break;
102 }
103
104 align >>= PAGE_SHIFT; 152 align >>= PAGE_SHIFT;
105 153
106 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
107 if (dev_priv->card_type == NV_50) {
108 size = (size + 65535) & ~65535;
109 if (align < (65536 / PAGE_SIZE))
110 align = (65536 / PAGE_SIZE);
111 }
112
113 if (flags & TTM_PL_FLAG_VRAM)
114 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
115 if (flags & TTM_PL_FLAG_TT)
116 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
117 nvbo->placement.fpfn = 0; 154 nvbo->placement.fpfn = 0;
118 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; 155 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
119 nvbo->placement.placement = nvbo->placements; 156 nouveau_bo_placement_set(nvbo, flags);
120 nvbo->placement.busy_placement = nvbo->placements;
121 nvbo->placement.num_placement = n;
122 nvbo->placement.num_busy_placement = n;
123 157
124 nvbo->channel = chan; 158 nvbo->channel = chan;
125 nouveau_bo_placement_set(nvbo, flags);
126 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 159 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
127 ttm_bo_type_device, &nvbo->placement, align, 0, 160 ttm_bo_type_device, &nvbo->placement, align, 0,
128 false, NULL, size, nouveau_bo_del_ttm); 161 false, NULL, size, nouveau_bo_del_ttm);
@@ -421,6 +454,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
421/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access 454/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
422 * TTM_PL_{VRAM,TT} directly. 455 * TTM_PL_{VRAM,TT} directly.
423 */ 456 */
457
424static int 458static int
425nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 459nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
426 struct nouveau_bo *nvbo, bool evict, bool no_wait, 460 struct nouveau_bo *nvbo, bool evict, bool no_wait,
@@ -435,6 +469,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
435 469
436 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
437 evict, no_wait, new_mem); 471 evict, no_wait, new_mem);
472 if (nvbo->channel && nvbo->channel != chan)
473 ret = nouveau_fence_wait(fence, NULL, false, false);
438 nouveau_fence_unref((void *)&fence); 474 nouveau_fence_unref((void *)&fence);
439 return ret; 475 return ret;
440} 476}
@@ -455,11 +491,12 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
455} 491}
456 492
457static int 493static int
458nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait, 494nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
459 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 495 int no_wait, struct ttm_mem_reg *new_mem)
460{ 496{
461 struct nouveau_bo *nvbo = nouveau_bo(bo); 497 struct nouveau_bo *nvbo = nouveau_bo(bo);
462 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 498 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
499 struct ttm_mem_reg *old_mem = &bo->mem;
463 struct nouveau_channel *chan; 500 struct nouveau_channel *chan;
464 uint64_t src_offset, dst_offset; 501 uint64_t src_offset, dst_offset;
465 uint32_t page_count; 502 uint32_t page_count;
@@ -547,7 +584,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
547 584
548 placement.fpfn = placement.lpfn = 0; 585 placement.fpfn = placement.lpfn = 0;
549 placement.num_placement = placement.num_busy_placement = 1; 586 placement.num_placement = placement.num_busy_placement = 1;
550 placement.placement = &placement_memtype; 587 placement.placement = placement.busy_placement = &placement_memtype;
551 588
552 tmp_mem = *new_mem; 589 tmp_mem = *new_mem;
553 tmp_mem.mm_node = NULL; 590 tmp_mem.mm_node = NULL;
@@ -559,7 +596,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
559 if (ret) 596 if (ret)
560 goto out; 597 goto out;
561 598
562 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem); 599 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
563 if (ret) 600 if (ret)
564 goto out; 601 goto out;
565 602
@@ -585,7 +622,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
585 622
586 placement.fpfn = placement.lpfn = 0; 623 placement.fpfn = placement.lpfn = 0;
587 placement.num_placement = placement.num_busy_placement = 1; 624 placement.num_placement = placement.num_busy_placement = 1;
588 placement.placement = &placement_memtype; 625 placement.placement = placement.busy_placement = &placement_memtype;
589 626
590 tmp_mem = *new_mem; 627 tmp_mem = *new_mem;
591 tmp_mem.mm_node = NULL; 628 tmp_mem.mm_node = NULL;
@@ -597,7 +634,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
597 if (ret) 634 if (ret)
598 goto out; 635 goto out;
599 636
600 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem); 637 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
601 if (ret) 638 if (ret)
602 goto out; 639 goto out;
603 640
@@ -612,52 +649,106 @@ out:
612} 649}
613 650
614static int 651static int
615nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 652nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
616 bool no_wait, struct ttm_mem_reg *new_mem) 653 struct nouveau_tile_reg **new_tile)
617{ 654{
618 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 655 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
619 struct nouveau_bo *nvbo = nouveau_bo(bo);
620 struct drm_device *dev = dev_priv->dev; 656 struct drm_device *dev = dev_priv->dev;
621 struct ttm_mem_reg *old_mem = &bo->mem; 657 struct nouveau_bo *nvbo = nouveau_bo(bo);
658 uint64_t offset;
622 int ret; 659 int ret;
623 660
624 if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM && 661 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
625 !nvbo->no_vm) { 662 /* Nothing to do. */
626 uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT; 663 *new_tile = NULL;
664 return 0;
665 }
666
667 offset = new_mem->mm_node->start << PAGE_SHIFT;
627 668
669 if (dev_priv->card_type == NV_50) {
628 ret = nv50_mem_vm_bind_linear(dev, 670 ret = nv50_mem_vm_bind_linear(dev,
629 offset + dev_priv->vm_vram_base, 671 offset + dev_priv->vm_vram_base,
630 new_mem->size, nvbo->tile_flags, 672 new_mem->size, nvbo->tile_flags,
631 offset); 673 offset);
632 if (ret) 674 if (ret)
633 return ret; 675 return ret;
676
677 } else if (dev_priv->card_type >= NV_10) {
678 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
679 nvbo->tile_mode);
680 }
681
682 return 0;
683}
684
685static void
686nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
687 struct nouveau_tile_reg *new_tile,
688 struct nouveau_tile_reg **old_tile)
689{
690 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
691 struct drm_device *dev = dev_priv->dev;
692
693 if (dev_priv->card_type >= NV_10 &&
694 dev_priv->card_type < NV_50) {
695 if (*old_tile)
696 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
697
698 *old_tile = new_tile;
634 } 699 }
700}
701
702static int
703nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
704 bool no_wait, struct ttm_mem_reg *new_mem)
705{
706 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
707 struct nouveau_bo *nvbo = nouveau_bo(bo);
708 struct ttm_mem_reg *old_mem = &bo->mem;
709 struct nouveau_tile_reg *new_tile = NULL;
710 int ret = 0;
711
712 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
713 if (ret)
714 return ret;
635 715
716 /* Software copy if the card isn't up and running yet. */
636 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || 717 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
637 !dev_priv->channel) 718 !dev_priv->channel) {
638 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 719 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
720 goto out;
721 }
639 722
723 /* Fake bo copy. */
640 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 724 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
641 BUG_ON(bo->mem.mm_node != NULL); 725 BUG_ON(bo->mem.mm_node != NULL);
642 bo->mem = *new_mem; 726 bo->mem = *new_mem;
643 new_mem->mm_node = NULL; 727 new_mem->mm_node = NULL;
644 return 0; 728 goto out;
645 } 729 }
646 730
647 if (new_mem->mem_type == TTM_PL_SYSTEM) { 731 /* Hardware assisted copy. */
648 if (old_mem->mem_type == TTM_PL_SYSTEM) 732 if (new_mem->mem_type == TTM_PL_SYSTEM)
649 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 733 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
650 if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem)) 734 else if (old_mem->mem_type == TTM_PL_SYSTEM)
651 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 735 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
652 } else if (old_mem->mem_type == TTM_PL_SYSTEM) { 736 else
653 if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem)) 737 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
654 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
655 } else {
656 if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
657 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
658 }
659 738
660 return 0; 739 if (!ret)
740 goto out;
741
742 /* Fallback to software copy. */
743 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
744
745out:
746 if (ret)
747 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
748 else
749 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
750
751 return ret;
661} 752}
662 753
663static int 754static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 9aaa972f8822..2281f99da7fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
158 return ret; 158 return ret;
159 } 159 }
160 160
161 nouveau_dma_pre_init(chan);
162
161 /* Locate channel's user control regs */ 163 /* Locate channel's user control regs */
162 if (dev_priv->card_type < NV_40) 164 if (dev_priv->card_type < NV_40)
163 user = NV03_USER(channel); 165 user = NV03_USER(channel);
@@ -235,47 +237,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
235 return 0; 237 return 0;
236} 238}
237 239
238int
239nouveau_channel_idle(struct nouveau_channel *chan)
240{
241 struct drm_device *dev = chan->dev;
242 struct drm_nouveau_private *dev_priv = dev->dev_private;
243 struct nouveau_engine *engine = &dev_priv->engine;
244 uint32_t caches;
245 int idle;
246
247 if (!chan) {
248 NV_ERROR(dev, "no channel...\n");
249 return 1;
250 }
251
252 caches = nv_rd32(dev, NV03_PFIFO_CACHES);
253 nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
254
255 if (engine->fifo.channel_id(dev) != chan->id) {
256 struct nouveau_gpuobj *ramfc =
257 chan->ramfc ? chan->ramfc->gpuobj : NULL;
258
259 if (!ramfc) {
260 NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
261 return 1;
262 }
263
264 engine->instmem.prepare_access(dev, false);
265 if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
266 idle = 0;
267 else
268 idle = 1;
269 engine->instmem.finish_access(dev);
270 } else {
271 idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
272 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
273 }
274
275 nv_wr32(dev, NV03_PFIFO_CACHES, caches);
276 return idle;
277}
278
279/* stops a fifo */ 240/* stops a fifo */
280void 241void
281nouveau_channel_free(struct nouveau_channel *chan) 242nouveau_channel_free(struct nouveau_channel *chan)
@@ -317,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
317 /* Ensure the channel is no longer active on the GPU */ 278 /* Ensure the channel is no longer active on the GPU */
318 pfifo->reassign(dev, false); 279 pfifo->reassign(dev, false);
319 280
320 if (pgraph->channel(dev) == chan) { 281 pgraph->fifo_access(dev, false);
321 pgraph->fifo_access(dev, false); 282 if (pgraph->channel(dev) == chan)
322 pgraph->unload_context(dev); 283 pgraph->unload_context(dev);
323 pgraph->fifo_access(dev, true);
324 }
325 pgraph->destroy_context(chan); 284 pgraph->destroy_context(chan);
285 pgraph->fifo_access(dev, true);
326 286
327 if (pfifo->channel_id(dev) == chan->id) { 287 if (pfifo->channel_id(dev) == chan->id) {
328 pfifo->disable(dev); 288 pfifo->disable(dev);
@@ -414,7 +374,9 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
414 init->subchan[0].grclass = 0x0039; 374 init->subchan[0].grclass = 0x0039;
415 else 375 else
416 init->subchan[0].grclass = 0x5039; 376 init->subchan[0].grclass = 0x5039;
417 init->nr_subchan = 1; 377 init->subchan[1].handle = NvSw;
378 init->subchan[1].grclass = NV_SW;
379 init->nr_subchan = 2;
418 380
419 /* Named memory object area */ 381 /* Named memory object area */
420 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, 382 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 5a10deb8bdbd..d2f63353ea97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -24,9 +24,12 @@
24 * 24 *
25 */ 25 */
26 26
27#include <acpi/button.h>
28
27#include "drmP.h" 29#include "drmP.h"
28#include "drm_edid.h" 30#include "drm_edid.h"
29#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32
30#include "nouveau_reg.h" 33#include "nouveau_reg.h"
31#include "nouveau_drv.h" 34#include "nouveau_drv.h"
32#include "nouveau_encoder.h" 35#include "nouveau_encoder.h"
@@ -83,14 +86,17 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
83static void 86static void
84nouveau_connector_destroy(struct drm_connector *drm_connector) 87nouveau_connector_destroy(struct drm_connector *drm_connector)
85{ 88{
86 struct nouveau_connector *connector = nouveau_connector(drm_connector); 89 struct nouveau_connector *nv_connector =
87 struct drm_device *dev = connector->base.dev; 90 nouveau_connector(drm_connector);
91 struct drm_device *dev;
88 92
89 NV_DEBUG_KMS(dev, "\n"); 93 if (!nv_connector)
90
91 if (!connector)
92 return; 94 return;
93 95
96 dev = nv_connector->base.dev;
97 NV_DEBUG_KMS(dev, "\n");
98
99 kfree(nv_connector->edid);
94 drm_sysfs_connector_remove(drm_connector); 100 drm_sysfs_connector_remove(drm_connector);
95 drm_connector_cleanup(drm_connector); 101 drm_connector_cleanup(drm_connector);
96 kfree(drm_connector); 102 kfree(drm_connector);
@@ -233,10 +239,21 @@ nouveau_connector_detect(struct drm_connector *connector)
233 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 239 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
234 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 240 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
235 if (nv_encoder && nv_connector->native_mode) { 241 if (nv_encoder && nv_connector->native_mode) {
242#ifdef CONFIG_ACPI
243 if (!nouveau_ignorelid && !acpi_lid_open())
244 return connector_status_disconnected;
245#endif
236 nouveau_connector_set_encoder(connector, nv_encoder); 246 nouveau_connector_set_encoder(connector, nv_encoder);
237 return connector_status_connected; 247 return connector_status_connected;
238 } 248 }
239 249
250 /* Cleanup the previous EDID block. */
251 if (nv_connector->edid) {
252 drm_mode_connector_update_edid_property(connector, NULL);
253 kfree(nv_connector->edid);
254 nv_connector->edid = NULL;
255 }
256
240 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); 257 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
241 if (i2c) { 258 if (i2c) {
242 nouveau_connector_ddc_prepare(connector, &flags); 259 nouveau_connector_ddc_prepare(connector, &flags);
@@ -247,7 +264,7 @@ nouveau_connector_detect(struct drm_connector *connector)
247 if (!nv_connector->edid) { 264 if (!nv_connector->edid) {
248 NV_ERROR(dev, "DDC responded, but no EDID for %s\n", 265 NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
249 drm_get_connector_name(connector)); 266 drm_get_connector_name(connector));
250 return connector_status_disconnected; 267 goto detect_analog;
251 } 268 }
252 269
253 if (nv_encoder->dcb->type == OUTPUT_DP && 270 if (nv_encoder->dcb->type == OUTPUT_DP &&
@@ -281,6 +298,7 @@ nouveau_connector_detect(struct drm_connector *connector)
281 return connector_status_connected; 298 return connector_status_connected;
282 } 299 }
283 300
301detect_analog:
284 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 302 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
285 if (!nv_encoder) 303 if (!nv_encoder)
286 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 304 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
@@ -687,8 +705,12 @@ nouveau_connector_create_lvds(struct drm_device *dev,
687 */ 705 */
688 if (!nv_connector->edid && !nv_connector->native_mode && 706 if (!nv_connector->edid && !nv_connector->native_mode &&
689 !dev_priv->VBIOS.pub.fp_no_ddc) { 707 !dev_priv->VBIOS.pub.fp_no_ddc) {
690 nv_connector->edid = 708 struct edid *edid =
691 (struct edid *)nouveau_bios_embedded_edid(dev); 709 (struct edid *)nouveau_bios_embedded_edid(dev);
710 if (edid) {
711 nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
712 *(nv_connector->edid) = *edid;
713 }
692 } 714 }
693 715
694 if (!nv_connector->edid) 716 if (!nv_connector->edid)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 703553687b20..50d9e67745af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -29,12 +29,22 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_dma.h" 30#include "nouveau_dma.h"
31 31
32void
33nouveau_dma_pre_init(struct nouveau_channel *chan)
34{
35 chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
36 chan->dma.put = 0;
37 chan->dma.cur = chan->dma.put;
38 chan->dma.free = chan->dma.max - chan->dma.cur;
39}
40
32int 41int
33nouveau_dma_init(struct nouveau_channel *chan) 42nouveau_dma_init(struct nouveau_channel *chan)
34{ 43{
35 struct drm_device *dev = chan->dev; 44 struct drm_device *dev = chan->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 45 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_gpuobj *m2mf = NULL; 46 struct nouveau_gpuobj *m2mf = NULL;
47 struct nouveau_gpuobj *nvsw = NULL;
38 int ret, i; 48 int ret, i;
39 49
40 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ 50 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
@@ -47,6 +57,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
47 if (ret) 57 if (ret)
48 return ret; 58 return ret;
49 59
60 /* Create an NV_SW object for various sync purposes */
61 ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
62 if (ret)
63 return ret;
64
65 ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
66 if (ret)
67 return ret;
68
50 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 69 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
51 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); 70 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
52 if (ret) 71 if (ret)
@@ -64,12 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
64 return ret; 83 return ret;
65 } 84 }
66 85
67 /* Initialise DMA vars */
68 chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
69 chan->dma.put = 0;
70 chan->dma.cur = chan->dma.put;
71 chan->dma.free = chan->dma.max - chan->dma.cur;
72
73 /* Insert NOPS for NOUVEAU_DMA_SKIPS */ 86 /* Insert NOPS for NOUVEAU_DMA_SKIPS */
74 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); 87 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
75 if (ret) 88 if (ret)
@@ -87,6 +100,13 @@ nouveau_dma_init(struct nouveau_channel *chan)
87 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); 100 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
88 OUT_RING(chan, NvNotify0); 101 OUT_RING(chan, NvNotify0);
89 102
103 /* Initialise NV_SW */
104 ret = RING_SPACE(chan, 2);
105 if (ret)
106 return ret;
107 BEGIN_RING(chan, NvSubSw, 0, 1);
108 OUT_RING(chan, NvSw);
109
90 /* Sit back and pray the channel works.. */ 110 /* Sit back and pray the channel works.. */
91 FIRE_RING(chan); 111 FIRE_RING(chan);
92 112
@@ -106,47 +126,52 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
106 chan->dma.cur += nr_dwords; 126 chan->dma.cur += nr_dwords;
107} 127}
108 128
109static inline bool 129/* Fetch and adjust GPU GET pointer
110READ_GET(struct nouveau_channel *chan, uint32_t *get) 130 *
131 * Returns:
132 * value >= 0, the adjusted GET pointer
133 * -EINVAL if GET pointer currently outside main push buffer
134 * -EBUSY if timeout exceeded
135 */
136static inline int
137READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
111{ 138{
112 uint32_t val; 139 uint32_t val;
113 140
114 val = nvchan_rd32(chan, chan->user_get); 141 val = nvchan_rd32(chan, chan->user_get);
115 if (val < chan->pushbuf_base || 142
116 val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) { 143 /* reset counter as long as GET is still advancing, this is
117 /* meaningless to dma_wait() except to know whether the 144 * to avoid misdetecting a GPU lockup if the GPU happens to
118 * GPU has stalled or not 145 * just be processing an operation that takes a long time
119 */ 146 */
120 *get = val; 147 if (val != *prev_get) {
121 return false; 148 *prev_get = val;
149 *timeout = 0;
150 }
151
152 if ((++*timeout & 0xff) == 0) {
153 DRM_UDELAY(1);
154 if (*timeout > 100000)
155 return -EBUSY;
122 } 156 }
123 157
124 *get = (val - chan->pushbuf_base) >> 2; 158 if (val < chan->pushbuf_base ||
125 return true; 159 val > chan->pushbuf_base + (chan->dma.max << 2))
160 return -EINVAL;
161
162 return (val - chan->pushbuf_base) >> 2;
126} 163}
127 164
128int 165int
129nouveau_dma_wait(struct nouveau_channel *chan, int size) 166nouveau_dma_wait(struct nouveau_channel *chan, int size)
130{ 167{
131 uint32_t get, prev_get = 0, cnt = 0; 168 uint32_t prev_get = 0, cnt = 0;
132 bool get_valid; 169 int get;
133 170
134 while (chan->dma.free < size) { 171 while (chan->dma.free < size) {
135 /* reset counter as long as GET is still advancing, this is 172 get = READ_GET(chan, &prev_get, &cnt);
136 * to avoid misdetecting a GPU lockup if the GPU happens to 173 if (unlikely(get == -EBUSY))
137 * just be processing an operation that takes a long time 174 return -EBUSY;
138 */
139 get_valid = READ_GET(chan, &get);
140 if (get != prev_get) {
141 prev_get = get;
142 cnt = 0;
143 }
144
145 if ((++cnt & 0xff) == 0) {
146 DRM_UDELAY(1);
147 if (cnt > 100000)
148 return -EBUSY;
149 }
150 175
151 /* loop until we have a usable GET pointer. the value 176 /* loop until we have a usable GET pointer. the value
152 * we read from the GPU may be outside the main ring if 177 * we read from the GPU may be outside the main ring if
@@ -157,7 +182,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
157 * from the SKIPS area, so the code below doesn't have to deal 182 * from the SKIPS area, so the code below doesn't have to deal
158 * with some fun corner cases. 183 * with some fun corner cases.
159 */ 184 */
160 if (!get_valid || get < NOUVEAU_DMA_SKIPS) 185 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
161 continue; 186 continue;
162 187
163 if (get <= chan->dma.cur) { 188 if (get <= chan->dma.cur) {
@@ -183,6 +208,19 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
183 * after processing the currently pending commands. 208 * after processing the currently pending commands.
184 */ 209 */
185 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 210 OUT_RING(chan, chan->pushbuf_base | 0x20000000);
211
212 /* wait for GET to depart from the skips area.
213 * prevents writing GET==PUT and causing a race
214 * condition that causes us to think the GPU is
215 * idle when it's not.
216 */
217 do {
218 get = READ_GET(chan, &prev_get, &cnt);
219 if (unlikely(get == -EBUSY))
220 return -EBUSY;
221 if (unlikely(get == -EINVAL))
222 continue;
223 } while (get <= NOUVEAU_DMA_SKIPS);
186 WRITE_PUT(NOUVEAU_DMA_SKIPS); 224 WRITE_PUT(NOUVEAU_DMA_SKIPS);
187 225
188 /* we're now submitting commands at the start of 226 /* we're now submitting commands at the start of
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 04e85d8f757e..dabfd655f93e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -46,10 +46,11 @@
46/* Hardcoded object assignments to subchannels (subchannel id). */ 46/* Hardcoded object assignments to subchannels (subchannel id). */
47enum { 47enum {
48 NvSubM2MF = 0, 48 NvSubM2MF = 0,
49 NvSub2D = 1, 49 NvSubSw = 1,
50 NvSubCtxSurf2D = 1, 50 NvSub2D = 2,
51 NvSubGdiRect = 2, 51 NvSubCtxSurf2D = 2,
52 NvSubImageBlit = 3 52 NvSubGdiRect = 3,
53 NvSubImageBlit = 4
53}; 54};
54 55
55/* Object handles. */ 56/* Object handles. */
@@ -67,6 +68,7 @@ enum {
67 NvClipRect = 0x8000000b, 68 NvClipRect = 0x8000000b,
68 NvGdiRect = 0x8000000c, 69 NvGdiRect = 0x8000000c,
69 NvImageBlit = 0x8000000d, 70 NvImageBlit = 0x8000000d,
71 NvSw = 0x8000000e,
70 72
71 /* G80+ display objects */ 73 /* G80+ display objects */
72 NvEvoVRAM = 0x01000000, 74 NvEvoVRAM = 0x01000000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 9e2926c48579..f954ad93e81f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -490,7 +490,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { 490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", 491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
492 nv_rd32(dev, NV50_AUXCH_CTRL(index))); 492 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
493 return -EBUSY; 493 ret = -EBUSY;
494 goto out;
494 } 495 }
495 496
496 udelay(400); 497 udelay(400);
@@ -502,6 +503,11 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
502 } 503 }
503 504
504 if (cmd & 1) { 505 if (cmd & 1) {
506 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
507 ret = -EREMOTEIO;
508 goto out;
509 }
510
505 for (i = 0; i < 4; i++) { 511 for (i = 0; i < 4; i++) {
506 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
507 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); 513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 06eb993e0883..da3b93b84502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
57 57
58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); 58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
59int nouveau_vram_notify; 59int nouveau_vram_notify = 1;
60module_param_named(vram_notify, nouveau_vram_notify, int, 0400); 60module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
61 61
62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); 62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -71,6 +71,18 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
71int nouveau_uscript_tmds = -1; 71int nouveau_uscript_tmds = -1;
72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); 72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
73 73
74MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77
78MODULE_PARM_DESC(noagp, "Disable all acceleration");
79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81
82MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85
74MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 86MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
75 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 87 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
76 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 88 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5f8cbb79c499..5445cefdd03e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -59,11 +59,19 @@ struct nouveau_grctx;
59#define MAX_NUM_DCB_ENTRIES 16 59#define MAX_NUM_DCB_ENTRIES 16
60 60
61#define NOUVEAU_MAX_CHANNEL_NR 128 61#define NOUVEAU_MAX_CHANNEL_NR 128
62#define NOUVEAU_MAX_TILE_NR 15
62 63
63#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) 64#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
64#define NV50_VM_BLOCK (512*1024*1024ULL) 65#define NV50_VM_BLOCK (512*1024*1024ULL)
65#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) 66#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
66 67
68struct nouveau_tile_reg {
69 struct nouveau_fence *fence;
70 uint32_t addr;
71 uint32_t size;
72 bool used;
73};
74
67struct nouveau_bo { 75struct nouveau_bo {
68 struct ttm_buffer_object bo; 76 struct ttm_buffer_object bo;
69 struct ttm_placement placement; 77 struct ttm_placement placement;
@@ -83,6 +91,7 @@ struct nouveau_bo {
83 91
84 uint32_t tile_mode; 92 uint32_t tile_mode;
85 uint32_t tile_flags; 93 uint32_t tile_flags;
94 struct nouveau_tile_reg *tile;
86 95
87 struct drm_gem_object *gem; 96 struct drm_gem_object *gem;
88 struct drm_file *cpu_filp; 97 struct drm_file *cpu_filp;
@@ -277,8 +286,13 @@ struct nouveau_timer_engine {
277}; 286};
278 287
279struct nouveau_fb_engine { 288struct nouveau_fb_engine {
289 int num_tiles;
290
280 int (*init)(struct drm_device *dev); 291 int (*init)(struct drm_device *dev);
281 void (*takedown)(struct drm_device *dev); 292 void (*takedown)(struct drm_device *dev);
293
294 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
295 uint32_t size, uint32_t pitch);
282}; 296};
283 297
284struct nouveau_fifo_engine { 298struct nouveau_fifo_engine {
@@ -292,6 +306,8 @@ struct nouveau_fifo_engine {
292 void (*disable)(struct drm_device *); 306 void (*disable)(struct drm_device *);
293 void (*enable)(struct drm_device *); 307 void (*enable)(struct drm_device *);
294 bool (*reassign)(struct drm_device *, bool enable); 308 bool (*reassign)(struct drm_device *, bool enable);
309 bool (*cache_flush)(struct drm_device *dev);
310 bool (*cache_pull)(struct drm_device *dev, bool enable);
295 311
296 int (*channel_id)(struct drm_device *); 312 int (*channel_id)(struct drm_device *);
297 313
@@ -330,6 +346,9 @@ struct nouveau_pgraph_engine {
330 void (*destroy_context)(struct nouveau_channel *); 346 void (*destroy_context)(struct nouveau_channel *);
331 int (*load_context)(struct nouveau_channel *); 347 int (*load_context)(struct nouveau_channel *);
332 int (*unload_context)(struct drm_device *); 348 int (*unload_context)(struct drm_device *);
349
350 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
351 uint32_t size, uint32_t pitch);
333}; 352};
334 353
335struct nouveau_engine { 354struct nouveau_engine {
@@ -490,6 +509,8 @@ struct drm_nouveau_private {
490 void __iomem *ramin; 509 void __iomem *ramin;
491 uint32_t ramin_size; 510 uint32_t ramin_size;
492 511
512 struct nouveau_bo *vga_ram;
513
493 struct workqueue_struct *wq; 514 struct workqueue_struct *wq;
494 struct work_struct irq_work; 515 struct work_struct irq_work;
495 516
@@ -548,6 +569,12 @@ struct drm_nouveau_private {
548 unsigned long sg_handle; 569 unsigned long sg_handle;
549 } gart_info; 570 } gart_info;
550 571
572 /* nv10-nv40 tiling regions */
573 struct {
574 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
575 spinlock_t lock;
576 } tile;
577
551 /* G8x/G9x virtual address space */ 578 /* G8x/G9x virtual address space */
552 uint64_t vm_gart_base; 579 uint64_t vm_gart_base;
553 uint64_t vm_gart_size; 580 uint64_t vm_gart_size;
@@ -650,6 +677,9 @@ extern char *nouveau_tv_norm;
650extern int nouveau_reg_debug; 677extern int nouveau_reg_debug;
651extern char *nouveau_vbios; 678extern char *nouveau_vbios;
652extern int nouveau_ctxfw; 679extern int nouveau_ctxfw;
680extern int nouveau_ignorelid;
681extern int nouveau_nofbaccel;
682extern int nouveau_noaccel;
653 683
654/* nouveau_state.c */ 684/* nouveau_state.c */
655extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 685extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
@@ -685,6 +715,13 @@ extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
685extern int nouveau_mem_init(struct drm_device *); 715extern int nouveau_mem_init(struct drm_device *);
686extern int nouveau_mem_init_agp(struct drm_device *); 716extern int nouveau_mem_init_agp(struct drm_device *);
687extern void nouveau_mem_close(struct drm_device *); 717extern void nouveau_mem_close(struct drm_device *);
718extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
719 uint32_t addr,
720 uint32_t size,
721 uint32_t pitch);
722extern void nv10_mem_expire_tiling(struct drm_device *dev,
723 struct nouveau_tile_reg *tile,
724 struct nouveau_fence *fence);
688extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, 725extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
689 uint32_t size, uint32_t flags, 726 uint32_t size, uint32_t flags,
690 uint64_t phys); 727 uint64_t phys);
@@ -713,7 +750,6 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
713 struct drm_file *file_priv, 750 struct drm_file *file_priv,
714 uint32_t fb_ctxdma, uint32_t tt_ctxdma); 751 uint32_t fb_ctxdma, uint32_t tt_ctxdma);
715extern void nouveau_channel_free(struct nouveau_channel *); 752extern void nouveau_channel_free(struct nouveau_channel *);
716extern int nouveau_channel_idle(struct nouveau_channel *chan);
717 753
718/* nouveau_object.c */ 754/* nouveau_object.c */
719extern int nouveau_gpuobj_early_init(struct drm_device *); 755extern int nouveau_gpuobj_early_init(struct drm_device *);
@@ -756,6 +792,8 @@ extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
756 uint32_t *o_ret); 792 uint32_t *o_ret);
757extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, 793extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
758 struct nouveau_gpuobj **); 794 struct nouveau_gpuobj **);
795extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
796 struct nouveau_gpuobj **);
759extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, 797extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
760 struct drm_file *); 798 struct drm_file *);
761extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, 799extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
@@ -804,6 +842,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
804#endif 842#endif
805 843
806/* nouveau_dma.c */ 844/* nouveau_dma.c */
845extern void nouveau_dma_pre_init(struct nouveau_channel *);
807extern int nouveau_dma_init(struct nouveau_channel *); 846extern int nouveau_dma_init(struct nouveau_channel *);
808extern int nouveau_dma_wait(struct nouveau_channel *, int size); 847extern int nouveau_dma_wait(struct nouveau_channel *, int size);
809 848
@@ -879,16 +918,22 @@ extern void nv04_fb_takedown(struct drm_device *);
879/* nv10_fb.c */ 918/* nv10_fb.c */
880extern int nv10_fb_init(struct drm_device *); 919extern int nv10_fb_init(struct drm_device *);
881extern void nv10_fb_takedown(struct drm_device *); 920extern void nv10_fb_takedown(struct drm_device *);
921extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
922 uint32_t, uint32_t);
882 923
883/* nv40_fb.c */ 924/* nv40_fb.c */
884extern int nv40_fb_init(struct drm_device *); 925extern int nv40_fb_init(struct drm_device *);
885extern void nv40_fb_takedown(struct drm_device *); 926extern void nv40_fb_takedown(struct drm_device *);
927extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
928 uint32_t, uint32_t);
886 929
887/* nv04_fifo.c */ 930/* nv04_fifo.c */
888extern int nv04_fifo_init(struct drm_device *); 931extern int nv04_fifo_init(struct drm_device *);
889extern void nv04_fifo_disable(struct drm_device *); 932extern void nv04_fifo_disable(struct drm_device *);
890extern void nv04_fifo_enable(struct drm_device *); 933extern void nv04_fifo_enable(struct drm_device *);
891extern bool nv04_fifo_reassign(struct drm_device *, bool); 934extern bool nv04_fifo_reassign(struct drm_device *, bool);
935extern bool nv04_fifo_cache_flush(struct drm_device *);
936extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
892extern int nv04_fifo_channel_id(struct drm_device *); 937extern int nv04_fifo_channel_id(struct drm_device *);
893extern int nv04_fifo_create_context(struct nouveau_channel *); 938extern int nv04_fifo_create_context(struct nouveau_channel *);
894extern void nv04_fifo_destroy_context(struct nouveau_channel *); 939extern void nv04_fifo_destroy_context(struct nouveau_channel *);
@@ -941,6 +986,8 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *);
941extern int nv10_graph_load_context(struct nouveau_channel *); 986extern int nv10_graph_load_context(struct nouveau_channel *);
942extern int nv10_graph_unload_context(struct drm_device *); 987extern int nv10_graph_unload_context(struct drm_device *);
943extern void nv10_graph_context_switch(struct drm_device *); 988extern void nv10_graph_context_switch(struct drm_device *);
989extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
990 uint32_t, uint32_t);
944 991
945/* nv20_graph.c */ 992/* nv20_graph.c */
946extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; 993extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
@@ -952,6 +999,8 @@ extern int nv20_graph_unload_context(struct drm_device *);
952extern int nv20_graph_init(struct drm_device *); 999extern int nv20_graph_init(struct drm_device *);
953extern void nv20_graph_takedown(struct drm_device *); 1000extern void nv20_graph_takedown(struct drm_device *);
954extern int nv30_graph_init(struct drm_device *); 1001extern int nv30_graph_init(struct drm_device *);
1002extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
1003 uint32_t, uint32_t);
955 1004
956/* nv40_graph.c */ 1005/* nv40_graph.c */
957extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; 1006extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
@@ -963,6 +1012,8 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
963extern int nv40_graph_load_context(struct nouveau_channel *); 1012extern int nv40_graph_load_context(struct nouveau_channel *);
964extern int nv40_graph_unload_context(struct drm_device *); 1013extern int nv40_graph_unload_context(struct drm_device *);
965extern void nv40_grctx_init(struct nouveau_grctx *); 1014extern void nv40_grctx_init(struct nouveau_grctx *);
1015extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
1016 uint32_t, uint32_t);
966 1017
967/* nv50_graph.c */ 1018/* nv50_graph.c */
968extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; 1019extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
@@ -1030,8 +1081,7 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
1030 1081
1031/* nv04_dac.c */ 1082/* nv04_dac.c */
1032extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); 1083extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
1033extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, 1084extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
1034 struct drm_connector *connector);
1035extern int nv04_dac_output_offset(struct drm_encoder *encoder); 1085extern int nv04_dac_output_offset(struct drm_encoder *encoder);
1036extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); 1086extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
1037 1087
@@ -1049,9 +1099,6 @@ extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
1049 1099
1050/* nv17_tv.c */ 1100/* nv17_tv.c */
1051extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); 1101extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
1052extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
1053 struct drm_connector *connector,
1054 uint32_t pin_mask);
1055 1102
1056/* nv04_display.c */ 1103/* nv04_display.c */
1057extern int nv04_display_create(struct drm_device *); 1104extern int nv04_display_create(struct drm_device *);
@@ -1290,14 +1337,14 @@ nv_two_reg_pll(struct drm_device *dev)
1290 return false; 1337 return false;
1291} 1338}
1292 1339
1293#define NV50_NVSW 0x0000506e 1340#define NV_SW 0x0000506e
1294#define NV50_NVSW_DMA_SEMAPHORE 0x00000060 1341#define NV_SW_DMA_SEMAPHORE 0x00000060
1295#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064 1342#define NV_SW_SEMAPHORE_OFFSET 0x00000064
1296#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068 1343#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
1297#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c 1344#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
1298#define NV50_NVSW_DMA_VBLSEM 0x0000018c 1345#define NV_SW_DMA_VBLSEM 0x0000018c
1299#define NV50_NVSW_VBLSEM_OFFSET 0x00000400 1346#define NV_SW_VBLSEM_OFFSET 0x00000400
1300#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404 1347#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
1301#define NV50_NVSW_VBLSEM_RELEASE 0x00000408 1348#define NV_SW_VBLSEM_RELEASE 0x00000408
1302 1349
1303#endif /* __NOUVEAU_DRV_H__ */ 1350#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 84af25c238b6..ea879a2efef3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -64,8 +64,7 @@ nouveau_fbcon_sync(struct fb_info *info)
64 return 0; 64 return 0;
65 65
66 if (RING_SPACE(chan, 4)) { 66 if (RING_SPACE(chan, 4)) {
67 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 67 nouveau_fbcon_gpu_lockup(info);
68 info->flags |= FBINFO_HWACCEL_DISABLED;
69 return 0; 68 return 0;
70 } 69 }
71 70
@@ -86,8 +85,7 @@ nouveau_fbcon_sync(struct fb_info *info)
86 } 85 }
87 86
88 if (ret) { 87 if (ret) {
89 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 88 nouveau_fbcon_gpu_lockup(info);
90 info->flags |= FBINFO_HWACCEL_DISABLED;
91 return 0; 89 return 0;
92 } 90 }
93 91
@@ -109,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
109 .fb_setcmap = drm_fb_helper_setcmap, 107 .fb_setcmap = drm_fb_helper_setcmap,
110}; 108};
111 109
110static struct fb_ops nv04_fbcon_ops = {
111 .owner = THIS_MODULE,
112 .fb_check_var = drm_fb_helper_check_var,
113 .fb_set_par = drm_fb_helper_set_par,
114 .fb_setcolreg = drm_fb_helper_setcolreg,
115 .fb_fillrect = nv04_fbcon_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit,
118 .fb_sync = nouveau_fbcon_sync,
119 .fb_pan_display = drm_fb_helper_pan_display,
120 .fb_blank = drm_fb_helper_blank,
121 .fb_setcmap = drm_fb_helper_setcmap,
122};
123
124static struct fb_ops nv50_fbcon_ops = {
125 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par,
128 .fb_setcolreg = drm_fb_helper_setcolreg,
129 .fb_fillrect = nv50_fbcon_fillrect,
130 .fb_copyarea = nv50_fbcon_copyarea,
131 .fb_imageblit = nv50_fbcon_imageblit,
132 .fb_sync = nouveau_fbcon_sync,
133 .fb_pan_display = drm_fb_helper_pan_display,
134 .fb_blank = drm_fb_helper_blank,
135 .fb_setcmap = drm_fb_helper_setcmap,
136};
137
112static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 138static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
113 u16 blue, int regno) 139 u16 blue, int regno)
114{ 140{
@@ -212,11 +238,11 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
212 238
213 mode_cmd.bpp = surface_bpp; 239 mode_cmd.bpp = surface_bpp;
214 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); 240 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
215 mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256); 241 mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
216 mode_cmd.depth = surface_depth; 242 mode_cmd.depth = surface_depth;
217 243
218 size = mode_cmd.pitch * mode_cmd.height; 244 size = mode_cmd.pitch * mode_cmd.height;
219 size = ALIGN(size, PAGE_SIZE); 245 size = roundup(size, PAGE_SIZE);
220 246
221 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, 247 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
222 0, 0x0000, false, true, &nvbo); 248 0, 0x0000, false, true, &nvbo);
@@ -269,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
269 dev_priv->fbdev_info = info; 295 dev_priv->fbdev_info = info;
270 296
271 strcpy(info->fix.id, "nouveaufb"); 297 strcpy(info->fix.id, "nouveaufb");
272 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 298 if (nouveau_nofbaccel)
273 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; 299 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
300 else
301 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
302 FBINFO_HWACCEL_FILLRECT |
303 FBINFO_HWACCEL_IMAGEBLIT;
274 info->fbops = &nouveau_fbcon_ops; 304 info->fbops = &nouveau_fbcon_ops;
275 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 305 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
276 dev_priv->vm_vram_base; 306 dev_priv->vm_vram_base;
@@ -318,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
318 par->nouveau_fb = nouveau_fb; 348 par->nouveau_fb = nouveau_fb;
319 par->dev = dev; 349 par->dev = dev;
320 350
321 if (dev_priv->channel) { 351 if (dev_priv->channel && !nouveau_nofbaccel) {
322 switch (dev_priv->card_type) { 352 switch (dev_priv->card_type) {
323 case NV_50: 353 case NV_50:
324 nv50_fbcon_accel_init(info); 354 nv50_fbcon_accel_init(info);
355 info->fbops = &nv50_fbcon_ops;
325 break; 356 break;
326 default: 357 default:
327 nv04_fbcon_accel_init(info); 358 nv04_fbcon_accel_init(info);
359 info->fbops = &nv04_fbcon_ops;
328 break; 360 break;
329 }; 361 };
330 } 362 }
@@ -380,3 +412,12 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
380 412
381 return 0; 413 return 0;
382} 414}
415
416void nouveau_fbcon_gpu_lockup(struct fb_info *info)
417{
418 struct nouveau_fbcon_par *par = info->par;
419 struct drm_device *dev = par->dev;
420
421 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
422 info->flags |= FBINFO_HWACCEL_DISABLED;
423}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 8531140fedbc..f9c34e1a8c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,8 +40,15 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void); 40void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev); 41void nouveau_fbcon_zfill(struct drm_device *dev);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
45void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
43int nv04_fbcon_accel_init(struct fb_info *info); 46int nv04_fbcon_accel_init(struct fb_info *info);
47void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
48void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
49void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
44int nv50_fbcon_accel_init(struct fb_info *info); 50int nv50_fbcon_accel_init(struct fb_info *info);
45 51
52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
46#endif /* __NV50_FBCON_H__ */ 53#endif /* __NV50_FBCON_H__ */
47 54
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index dacac9a0842a..faddf53ff9ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -142,7 +142,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
142 list_add_tail(&fence->entry, &chan->fence.pending); 142 list_add_tail(&fence->entry, &chan->fence.pending);
143 spin_unlock_irqrestore(&chan->fence.lock, flags); 143 spin_unlock_irqrestore(&chan->fence.lock, flags);
144 144
145 BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1); 145 BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
146 OUT_RING(chan, fence->sequence); 146 OUT_RING(chan, fence->sequence);
147 FIRE_RING(chan); 147 FIRE_RING(chan);
148 148
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 18fd8ac9fca7..70cc30803e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -220,7 +220,6 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
220} 220}
221 221
222struct validate_op { 222struct validate_op {
223 struct nouveau_fence *fence;
224 struct list_head vram_list; 223 struct list_head vram_list;
225 struct list_head gart_list; 224 struct list_head gart_list;
226 struct list_head both_list; 225 struct list_head both_list;
@@ -252,17 +251,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
252} 251}
253 252
254static void 253static void
255validate_fini(struct validate_op *op, bool success) 254validate_fini(struct validate_op *op, struct nouveau_fence* fence)
256{ 255{
257 struct nouveau_fence *fence = op->fence; 256 validate_fini_list(&op->vram_list, fence);
258 257 validate_fini_list(&op->gart_list, fence);
259 if (unlikely(!success)) 258 validate_fini_list(&op->both_list, fence);
260 op->fence = NULL;
261
262 validate_fini_list(&op->vram_list, op->fence);
263 validate_fini_list(&op->gart_list, op->fence);
264 validate_fini_list(&op->both_list, op->fence);
265 nouveau_fence_unref((void *)&fence);
266} 259}
267 260
268static int 261static int
@@ -328,6 +321,7 @@ retry:
328 else { 321 else {
329 NV_ERROR(dev, "invalid valid domains: 0x%08x\n", 322 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
330 b->valid_domains); 323 b->valid_domains);
324 list_add_tail(&nvbo->entry, &op->both_list);
331 validate_fini(op, NULL); 325 validate_fini(op, NULL);
332 return -EINVAL; 326 return -EINVAL;
333 } 327 }
@@ -420,10 +414,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
420 INIT_LIST_HEAD(&op->gart_list); 414 INIT_LIST_HEAD(&op->gart_list);
421 INIT_LIST_HEAD(&op->both_list); 415 INIT_LIST_HEAD(&op->both_list);
422 416
423 ret = nouveau_fence_new(chan, &op->fence, false);
424 if (ret)
425 return ret;
426
427 if (nr_buffers == 0) 417 if (nr_buffers == 0)
428 return 0; 418 return 0;
429 419
@@ -477,13 +467,14 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
477static int 467static int
478nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
479 struct drm_nouveau_gem_pushbuf_bo *bo, 469 struct drm_nouveau_gem_pushbuf_bo *bo,
480 int nr_relocs, uint64_t ptr_relocs, 470 unsigned nr_relocs, uint64_t ptr_relocs,
481 int nr_dwords, int first_dword, 471 unsigned nr_dwords, unsigned first_dword,
482 uint32_t *pushbuf, bool is_iomem) 472 uint32_t *pushbuf, bool is_iomem)
483{ 473{
484 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
485 struct drm_device *dev = chan->dev; 475 struct drm_device *dev = chan->dev;
486 int ret = 0, i; 476 int ret = 0;
477 unsigned i;
487 478
488 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 479 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
489 if (IS_ERR(reloc)) 480 if (IS_ERR(reloc))
@@ -541,6 +532,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
541 struct drm_nouveau_gem_pushbuf_bo *bo = NULL; 532 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
542 struct nouveau_channel *chan; 533 struct nouveau_channel *chan;
543 struct validate_op op; 534 struct validate_op op;
535 struct nouveau_fence* fence = 0;
544 uint32_t *pushbuf = NULL; 536 uint32_t *pushbuf = NULL;
545 int ret = 0, do_reloc = 0, i; 537 int ret = 0, do_reloc = 0, i;
546 538
@@ -597,7 +589,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
597 589
598 OUT_RINGp(chan, pushbuf, req->nr_dwords); 590 OUT_RINGp(chan, pushbuf, req->nr_dwords);
599 591
600 ret = nouveau_fence_emit(op.fence); 592 ret = nouveau_fence_new(chan, &fence, true);
601 if (ret) { 593 if (ret) {
602 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 594 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
603 WIND_RING(chan); 595 WIND_RING(chan);
@@ -605,7 +597,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
605 } 597 }
606 598
607 if (nouveau_gem_pushbuf_sync(chan)) { 599 if (nouveau_gem_pushbuf_sync(chan)) {
608 ret = nouveau_fence_wait(op.fence, NULL, false, false); 600 ret = nouveau_fence_wait(fence, NULL, false, false);
609 if (ret) { 601 if (ret) {
610 for (i = 0; i < req->nr_dwords; i++) 602 for (i = 0; i < req->nr_dwords; i++)
611 NV_ERROR(dev, "0x%08x\n", pushbuf[i]); 603 NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
@@ -614,7 +606,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
614 } 606 }
615 607
616out: 608out:
617 validate_fini(&op, ret == 0); 609 validate_fini(&op, fence);
610 nouveau_fence_unref((void**)&fence);
618 mutex_unlock(&dev->struct_mutex); 611 mutex_unlock(&dev->struct_mutex);
619 kfree(pushbuf); 612 kfree(pushbuf);
620 kfree(bo); 613 kfree(bo);
@@ -634,6 +627,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
634 struct drm_gem_object *gem; 627 struct drm_gem_object *gem;
635 struct nouveau_bo *pbbo; 628 struct nouveau_bo *pbbo;
636 struct validate_op op; 629 struct validate_op op;
630 struct nouveau_fence* fence = 0;
637 int i, ret = 0, do_reloc = 0; 631 int i, ret = 0, do_reloc = 0;
638 632
639 NOUVEAU_CHECK_INITIALISED_WITH_RETURN; 633 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
@@ -675,6 +669,18 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
675 } 669 }
676 pbbo = nouveau_gem_object(gem); 670 pbbo = nouveau_gem_object(gem);
677 671
672 if ((req->offset & 3) || req->nr_dwords < 2 ||
673 (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
674 (unsigned long)req->nr_dwords >
675 ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
676 NV_ERROR(dev, "pb call misaligned or out of bounds: "
677 "%d + %d * 4 > %ld\n",
678 req->offset, req->nr_dwords, pbbo->bo.mem.size);
679 ret = -EINVAL;
680 drm_gem_object_unreference(gem);
681 goto out;
682 }
683
678 ret = ttm_bo_reserve(&pbbo->bo, false, false, true, 684 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
679 chan->fence.sequence); 685 chan->fence.sequence);
680 if (ret) { 686 if (ret) {
@@ -772,7 +778,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
772 OUT_RING(chan, 0); 778 OUT_RING(chan, 0);
773 } 779 }
774 780
775 ret = nouveau_fence_emit(op.fence); 781 ret = nouveau_fence_new(chan, &fence, true);
776 if (ret) { 782 if (ret) {
777 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 783 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
778 WIND_RING(chan); 784 WIND_RING(chan);
@@ -780,7 +786,8 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
780 } 786 }
781 787
782out: 788out:
783 validate_fini(&op, ret == 0); 789 validate_fini(&op, fence);
790 nouveau_fence_unref((void**)&fence);
784 mutex_unlock(&dev->struct_mutex); 791 mutex_unlock(&dev->struct_mutex);
785 kfree(bo); 792 kfree(bo);
786 793
@@ -918,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
918 } 925 }
919 926
920 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
928 spin_lock(&nvbo->bo.lock);
921 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 929 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
930 spin_unlock(&nvbo->bo.lock);
922 } else { 931 } else {
923 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 932 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
924 if (ret == 0) 933 if (ret == 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 419f4c2b3b89..c7ebec696747 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
97 } 97 }
98 98
99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); 99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
100 if (!pgraph->ctxprog) { 100 if (!pgraph->ctxvals) {
101 NV_ERROR(dev, "OOM copying ctxprog\n"); 101 NV_ERROR(dev, "OOM copying ctxvals\n");
102 release_firmware(fw); 102 release_firmware(fw);
103 nouveau_grctx_fini(dev); 103 nouveau_grctx_fini(dev);
104 return -ENOMEM; 104 return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 370c72c968d1..447f9f69d6b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
211 get + 4); 211 get + 4);
212 } 212 }
213 213
214 if (status & NV_PFIFO_INTR_SEMAPHORE) {
215 uint32_t sem;
216
217 status &= ~NV_PFIFO_INTR_SEMAPHORE;
218 nv_wr32(dev, NV03_PFIFO_INTR_0,
219 NV_PFIFO_INTR_SEMAPHORE);
220
221 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
222 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
223
224 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
225 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
226 }
227
214 if (status) { 228 if (status) {
215 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 229 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
216 status, chid); 230 status, chid);
@@ -483,6 +497,13 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 497 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
484 if (nouveau_pgraph_intr_swmthd(dev, &trap)) 498 if (nouveau_pgraph_intr_swmthd(dev, &trap))
485 unhandled = 1; 499 unhandled = 1;
500 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
501 uint32_t v = nv_rd32(dev, 0x402000);
502 nv_wr32(dev, 0x402000, v);
503
504 /* dump the error anyway for now: it's useful for
505 Gallium development */
506 unhandled = 1;
486 } else { 507 } else {
487 unhandled = 1; 508 unhandled = 1;
488 } 509 }
@@ -559,85 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
559static void 580static void
560nv50_pgraph_irq_handler(struct drm_device *dev) 581nv50_pgraph_irq_handler(struct drm_device *dev)
561{ 582{
562 uint32_t status, nsource; 583 uint32_t status;
563 584
564 status = nv_rd32(dev, NV03_PGRAPH_INTR); 585 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
565 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 586 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
566 587
567 if (status & 0x00000001) { 588 if (status & 0x00000001) {
568 nouveau_pgraph_intr_notify(dev, nsource); 589 nouveau_pgraph_intr_notify(dev, nsource);
569 status &= ~0x00000001; 590 status &= ~0x00000001;
570 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 591 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
571 } 592 }
572 593
573 if (status & 0x00000010) { 594 if (status & 0x00000010) {
574 nouveau_pgraph_intr_error(dev, nsource | 595 nouveau_pgraph_intr_error(dev, nsource |
575 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 596 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
576 597
577 status &= ~0x00000010; 598 status &= ~0x00000010;
578 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 599 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
579 } 600 }
580 601
581 if (status & 0x00001000) { 602 if (status & 0x00001000) {
582 nv_wr32(dev, 0x400500, 0x00000000); 603 nv_wr32(dev, 0x400500, 0x00000000);
583 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 604 nv_wr32(dev, NV03_PGRAPH_INTR,
584 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, 605 NV_PGRAPH_INTR_CONTEXT_SWITCH);
585 NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); 606 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
586 nv_wr32(dev, 0x400500, 0x00010001); 607 NV40_PGRAPH_INTR_EN) &
608 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
609 nv_wr32(dev, 0x400500, 0x00010001);
587 610
588 nv50_graph_context_switch(dev); 611 nv50_graph_context_switch(dev);
589 612
590 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 613 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
591 } 614 }
592 615
593 if (status & 0x00100000) { 616 if (status & 0x00100000) {
594 nouveau_pgraph_intr_error(dev, nsource | 617 nouveau_pgraph_intr_error(dev, nsource |
595 NV03_PGRAPH_NSOURCE_DATA_ERROR); 618 NV03_PGRAPH_NSOURCE_DATA_ERROR);
596 619
597 status &= ~0x00100000; 620 status &= ~0x00100000;
598 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 621 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
599 } 622 }
600 623
601 if (status & 0x00200000) { 624 if (status & 0x00200000) {
602 int r; 625 int r;
603 626
604 nouveau_pgraph_intr_error(dev, nsource | 627 nouveau_pgraph_intr_error(dev, nsource |
605 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); 628 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
606 629
607 NV_ERROR(dev, "magic set 1:\n"); 630 NV_ERROR(dev, "magic set 1:\n");
608 for (r = 0x408900; r <= 0x408910; r += 4) 631 for (r = 0x408900; r <= 0x408910; r += 4)
609 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 632 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
610 nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); 633 nv_rd32(dev, r));
611 for (r = 0x408e08; r <= 0x408e24; r += 4) 634 nv_wr32(dev, 0x408900,
612 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 635 nv_rd32(dev, 0x408904) | 0xc0000000);
613 nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); 636 for (r = 0x408e08; r <= 0x408e24; r += 4)
614 637 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
615 NV_ERROR(dev, "magic set 2:\n"); 638 nv_rd32(dev, r));
616 for (r = 0x409900; r <= 0x409910; r += 4) 639 nv_wr32(dev, 0x408e08,
617 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 640 nv_rd32(dev, 0x408e08) | 0xc0000000);
618 nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); 641
619 for (r = 0x409e08; r <= 0x409e24; r += 4) 642 NV_ERROR(dev, "magic set 2:\n");
620 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 643 for (r = 0x409900; r <= 0x409910; r += 4)
621 nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); 644 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
622 645 nv_rd32(dev, r));
623 status &= ~0x00200000; 646 nv_wr32(dev, 0x409900,
624 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); 647 nv_rd32(dev, 0x409904) | 0xc0000000);
625 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 648 for (r = 0x409e08; r <= 0x409e24; r += 4)
626 } 649 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
650 nv_rd32(dev, r));
651 nv_wr32(dev, 0x409e08,
652 nv_rd32(dev, 0x409e08) | 0xc0000000);
653
654 status &= ~0x00200000;
655 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
656 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
657 }
627 658
628 if (status) { 659 if (status) {
629 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); 660 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
630 nv_wr32(dev, NV03_PGRAPH_INTR, status); 661 status);
631 } 662 nv_wr32(dev, NV03_PGRAPH_INTR, status);
663 }
632 664
633 { 665 {
634 const int isb = (1 << 16) | (1 << 0); 666 const int isb = (1 << 16) | (1 << 0);
635 667
636 if ((nv_rd32(dev, 0x400500) & isb) != isb) 668 if ((nv_rd32(dev, 0x400500) & isb) != isb)
637 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); 669 nv_wr32(dev, 0x400500,
670 nv_rd32(dev, 0x400500) | isb);
671 }
638 } 672 }
639 673
640 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 674 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
675 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
641} 676}
642 677
643static void 678static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5158a12f7844..8f3a12f614ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -192,6 +192,92 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
192} 192}
193 193
194/* 194/*
195 * NV10-NV40 tiling helpers
196 */
197
198static void
199nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
200 uint32_t size, uint32_t pitch)
201{
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
203 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
204 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
205 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
206 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
207
208 tile->addr = addr;
209 tile->size = size;
210 tile->used = !!pitch;
211 nouveau_fence_unref((void **)&tile->fence);
212
213 if (!pfifo->cache_flush(dev))
214 return;
215
216 pfifo->reassign(dev, false);
217 pfifo->cache_flush(dev);
218 pfifo->cache_pull(dev, false);
219
220 nouveau_wait_for_idle(dev);
221
222 pgraph->set_region_tiling(dev, i, addr, size, pitch);
223 pfb->set_region_tiling(dev, i, addr, size, pitch);
224
225 pfifo->cache_pull(dev, true);
226 pfifo->reassign(dev, true);
227}
228
229struct nouveau_tile_reg *
230nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
231 uint32_t pitch)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
235 struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
236 int i;
237
238 spin_lock(&dev_priv->tile.lock);
239
240 for (i = 0; i < pfb->num_tiles; i++) {
241 if (tile[i].used)
242 /* Tile region in use. */
243 continue;
244
245 if (tile[i].fence &&
246 !nouveau_fence_signalled(tile[i].fence, NULL))
247 /* Pending tile region. */
248 continue;
249
250 if (max(tile[i].addr, addr) <
251 min(tile[i].addr + tile[i].size, addr + size))
252 /* Kill an intersecting tile region. */
253 nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
254
255 if (pitch && !found) {
256 /* Free tile region. */
257 nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
258 found = &tile[i];
259 }
260 }
261
262 spin_unlock(&dev_priv->tile.lock);
263
264 return found;
265}
266
267void
268nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
269 struct nouveau_fence *fence)
270{
271 if (fence) {
272 /* Mark it as pending. */
273 tile->fence = fence;
274 nouveau_fence_ref(fence);
275 }
276
277 tile->used = false;
278}
279
280/*
195 * NV50 VM helpers 281 * NV50 VM helpers
196 */ 282 */
197int 283int
@@ -297,9 +383,8 @@ void nouveau_mem_close(struct drm_device *dev)
297{ 383{
298 struct drm_nouveau_private *dev_priv = dev->dev_private; 384 struct drm_nouveau_private *dev_priv = dev->dev_private;
299 385
300 if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type) 386 nouveau_bo_unpin(dev_priv->vga_ram);
301 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0); 387 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
302 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
303 388
304 ttm_bo_device_release(&dev_priv->ttm.bdev); 389 ttm_bo_device_release(&dev_priv->ttm.bdev);
305 390
@@ -513,6 +598,7 @@ nouveau_mem_init(struct drm_device *dev)
513 598
514 INIT_LIST_HEAD(&dev_priv->ttm.bo_list); 599 INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
515 spin_lock_init(&dev_priv->ttm.bo_list_lock); 600 spin_lock_init(&dev_priv->ttm.bo_list_lock);
601 spin_lock_init(&dev_priv->tile.lock);
516 602
517 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); 603 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
518 604
@@ -535,6 +621,15 @@ nouveau_mem_init(struct drm_device *dev)
535 return ret; 621 return ret;
536 } 622 }
537 623
624 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
625 0, 0, true, true, &dev_priv->vga_ram);
626 if (ret == 0)
627 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
628 if (ret) {
629 NV_WARN(dev, "failed to reserve VGA memory\n");
630 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
631 }
632
538 /* GART */ 633 /* GART */
539#if !defined(__powerpc__) && !defined(__ia64__) 634#if !defined(__powerpc__) && !defined(__ia64__)
540 if (drm_device_is_agp(dev) && dev->agp) { 635 if (drm_device_is_agp(dev) && dev->agp) {
@@ -566,6 +661,7 @@ nouveau_mem_init(struct drm_device *dev)
566 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), 661 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
567 drm_get_resource_len(dev, 1), 662 drm_get_resource_len(dev, 1),
568 DRM_MTRR_WC); 663 DRM_MTRR_WC);
664
569 return 0; 665 return 0;
570} 666}
571 667
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34b6345..d99dc087f9b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{ 34{
35 struct drm_device *dev = chan->dev; 35 struct drm_device *dev = chan->dev;
36 struct nouveau_bo *ntfy = NULL; 36 struct nouveau_bo *ntfy = NULL;
37 uint32_t flags;
37 int ret; 38 int ret;
38 39
39 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? 40 if (nouveau_vram_notify)
40 TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, 41 flags = TTM_PL_FLAG_VRAM;
42 else
43 flags = TTM_PL_FLAG_TT;
44
45 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
41 0, 0x0000, false, true, &ntfy); 46 0, 0x0000, false, true, &ntfy);
42 if (ret) 47 if (ret)
43 return ret; 48 return ret;
44 49
45 ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); 50 ret = nouveau_bo_pin(ntfy, flags);
46 if (ret) 51 if (ret)
47 goto out_err; 52 goto out_err;
48 53
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
128 target = NV_DMA_TARGET_PCI; 133 target = NV_DMA_TARGET_PCI;
129 } else { 134 } else {
130 target = NV_DMA_TARGET_AGP; 135 target = NV_DMA_TARGET_AGP;
136 if (dev_priv->card_type >= NV_50)
137 offset += dev_priv->vm_gart_base;
131 } 138 }
132 } else { 139 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", 140 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 93379bb81bea..e7c100ba63a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -881,15 +881,16 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
881 return 0; 881 return 0;
882} 882}
883 883
884static int 884int
885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
886 struct nouveau_gpuobj **gpuobj_ret) 886 struct nouveau_gpuobj **gpuobj_ret)
887{ 887{
888 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 888 struct drm_nouveau_private *dev_priv;
889 struct nouveau_gpuobj *gpuobj; 889 struct nouveau_gpuobj *gpuobj;
890 890
891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) 891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
892 return -EINVAL; 892 return -EINVAL;
893 dev_priv = chan->dev->dev_private;
893 894
894 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 895 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
895 if (!gpuobj) 896 if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index fa1b0e7165b9..aa9b310e41be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
99 * the card will hang early on in the X init process. 99 * the card will hang early on in the X init process.
100 */ 100 */
101# define NV_PMC_ENABLE_UNK13 (1<<13) 101# define NV_PMC_ENABLE_UNK13 (1<<13)
102#define NV40_PMC_GRAPH_UNITS 0x00001540
102#define NV40_PMC_BACKLIGHT 0x000015f0 103#define NV40_PMC_BACKLIGHT 0x000015f0
103# define NV40_PMC_BACKLIGHT_MASK 0x001f0000 104# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
104#define NV40_PMC_1700 0x00001700 105#define NV40_PMC_1700 0x00001700
@@ -349,19 +350,19 @@
349#define NV04_PGRAPH_BLEND 0x00400824 350#define NV04_PGRAPH_BLEND 0x00400824
350#define NV04_PGRAPH_STORED_FMT 0x00400830 351#define NV04_PGRAPH_STORED_FMT 0x00400830
351#define NV04_PGRAPH_PATT_COLORRAM 0x00400900 352#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
352#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16)) 353#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
353#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16)) 354#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
354#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16)) 355#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
355#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16)) 356#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
356#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) 357#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
357#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) 358#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
358#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) 359#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
359#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) 360#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
360#define NV04_PGRAPH_U_RAM 0x00400D00 361#define NV04_PGRAPH_U_RAM 0x00400D00
361#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16)) 362#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
362#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16)) 363#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
363#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16)) 364#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
364#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16)) 365#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
365#define NV04_PGRAPH_V_RAM 0x00400D40 366#define NV04_PGRAPH_V_RAM 0x00400D40
366#define NV04_PGRAPH_W_RAM 0x00400D80 367#define NV04_PGRAPH_W_RAM 0x00400D80
367#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 368#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e403e80..ed1590577b6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
54nouveau_sgdma_clear(struct ttm_backend *be) 54nouveau_sgdma_clear(struct ttm_backend *be)
55{ 55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57 struct drm_device *dev = nvbe->dev; 57 struct drm_device *dev;
58
59 NV_DEBUG(nvbe->dev, "\n");
60 58
61 if (nvbe && nvbe->pages) { 59 if (nvbe && nvbe->pages) {
60 dev = nvbe->dev;
61 NV_DEBUG(dev, "\n");
62
62 if (nvbe->bound) 63 if (nvbe->bound)
63 be->func->unbind(be); 64 be->func->unbind(be);
64 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e76ec2d207a9..a4851af5b05e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -76,6 +76,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
76 engine->fifo.disable = nv04_fifo_disable; 76 engine->fifo.disable = nv04_fifo_disable;
77 engine->fifo.enable = nv04_fifo_enable; 77 engine->fifo.enable = nv04_fifo_enable;
78 engine->fifo.reassign = nv04_fifo_reassign; 78 engine->fifo.reassign = nv04_fifo_reassign;
79 engine->fifo.cache_flush = nv04_fifo_cache_flush;
80 engine->fifo.cache_pull = nv04_fifo_cache_pull;
79 engine->fifo.channel_id = nv04_fifo_channel_id; 81 engine->fifo.channel_id = nv04_fifo_channel_id;
80 engine->fifo.create_context = nv04_fifo_create_context; 82 engine->fifo.create_context = nv04_fifo_create_context;
81 engine->fifo.destroy_context = nv04_fifo_destroy_context; 83 engine->fifo.destroy_context = nv04_fifo_destroy_context;
@@ -100,6 +102,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
100 engine->timer.takedown = nv04_timer_takedown; 102 engine->timer.takedown = nv04_timer_takedown;
101 engine->fb.init = nv10_fb_init; 103 engine->fb.init = nv10_fb_init;
102 engine->fb.takedown = nv10_fb_takedown; 104 engine->fb.takedown = nv10_fb_takedown;
105 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
103 engine->graph.grclass = nv10_graph_grclass; 106 engine->graph.grclass = nv10_graph_grclass;
104 engine->graph.init = nv10_graph_init; 107 engine->graph.init = nv10_graph_init;
105 engine->graph.takedown = nv10_graph_takedown; 108 engine->graph.takedown = nv10_graph_takedown;
@@ -109,12 +112,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
109 engine->graph.fifo_access = nv04_graph_fifo_access; 112 engine->graph.fifo_access = nv04_graph_fifo_access;
110 engine->graph.load_context = nv10_graph_load_context; 113 engine->graph.load_context = nv10_graph_load_context;
111 engine->graph.unload_context = nv10_graph_unload_context; 114 engine->graph.unload_context = nv10_graph_unload_context;
115 engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
112 engine->fifo.channels = 32; 116 engine->fifo.channels = 32;
113 engine->fifo.init = nv10_fifo_init; 117 engine->fifo.init = nv10_fifo_init;
114 engine->fifo.takedown = nouveau_stub_takedown; 118 engine->fifo.takedown = nouveau_stub_takedown;
115 engine->fifo.disable = nv04_fifo_disable; 119 engine->fifo.disable = nv04_fifo_disable;
116 engine->fifo.enable = nv04_fifo_enable; 120 engine->fifo.enable = nv04_fifo_enable;
117 engine->fifo.reassign = nv04_fifo_reassign; 121 engine->fifo.reassign = nv04_fifo_reassign;
122 engine->fifo.cache_flush = nv04_fifo_cache_flush;
123 engine->fifo.cache_pull = nv04_fifo_cache_pull;
118 engine->fifo.channel_id = nv10_fifo_channel_id; 124 engine->fifo.channel_id = nv10_fifo_channel_id;
119 engine->fifo.create_context = nv10_fifo_create_context; 125 engine->fifo.create_context = nv10_fifo_create_context;
120 engine->fifo.destroy_context = nv10_fifo_destroy_context; 126 engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -139,6 +145,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
139 engine->timer.takedown = nv04_timer_takedown; 145 engine->timer.takedown = nv04_timer_takedown;
140 engine->fb.init = nv10_fb_init; 146 engine->fb.init = nv10_fb_init;
141 engine->fb.takedown = nv10_fb_takedown; 147 engine->fb.takedown = nv10_fb_takedown;
148 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
142 engine->graph.grclass = nv20_graph_grclass; 149 engine->graph.grclass = nv20_graph_grclass;
143 engine->graph.init = nv20_graph_init; 150 engine->graph.init = nv20_graph_init;
144 engine->graph.takedown = nv20_graph_takedown; 151 engine->graph.takedown = nv20_graph_takedown;
@@ -148,12 +155,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
148 engine->graph.fifo_access = nv04_graph_fifo_access; 155 engine->graph.fifo_access = nv04_graph_fifo_access;
149 engine->graph.load_context = nv20_graph_load_context; 156 engine->graph.load_context = nv20_graph_load_context;
150 engine->graph.unload_context = nv20_graph_unload_context; 157 engine->graph.unload_context = nv20_graph_unload_context;
158 engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
151 engine->fifo.channels = 32; 159 engine->fifo.channels = 32;
152 engine->fifo.init = nv10_fifo_init; 160 engine->fifo.init = nv10_fifo_init;
153 engine->fifo.takedown = nouveau_stub_takedown; 161 engine->fifo.takedown = nouveau_stub_takedown;
154 engine->fifo.disable = nv04_fifo_disable; 162 engine->fifo.disable = nv04_fifo_disable;
155 engine->fifo.enable = nv04_fifo_enable; 163 engine->fifo.enable = nv04_fifo_enable;
156 engine->fifo.reassign = nv04_fifo_reassign; 164 engine->fifo.reassign = nv04_fifo_reassign;
165 engine->fifo.cache_flush = nv04_fifo_cache_flush;
166 engine->fifo.cache_pull = nv04_fifo_cache_pull;
157 engine->fifo.channel_id = nv10_fifo_channel_id; 167 engine->fifo.channel_id = nv10_fifo_channel_id;
158 engine->fifo.create_context = nv10_fifo_create_context; 168 engine->fifo.create_context = nv10_fifo_create_context;
159 engine->fifo.destroy_context = nv10_fifo_destroy_context; 169 engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -178,6 +188,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
178 engine->timer.takedown = nv04_timer_takedown; 188 engine->timer.takedown = nv04_timer_takedown;
179 engine->fb.init = nv10_fb_init; 189 engine->fb.init = nv10_fb_init;
180 engine->fb.takedown = nv10_fb_takedown; 190 engine->fb.takedown = nv10_fb_takedown;
191 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
181 engine->graph.grclass = nv30_graph_grclass; 192 engine->graph.grclass = nv30_graph_grclass;
182 engine->graph.init = nv30_graph_init; 193 engine->graph.init = nv30_graph_init;
183 engine->graph.takedown = nv20_graph_takedown; 194 engine->graph.takedown = nv20_graph_takedown;
@@ -187,12 +198,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
187 engine->graph.destroy_context = nv20_graph_destroy_context; 198 engine->graph.destroy_context = nv20_graph_destroy_context;
188 engine->graph.load_context = nv20_graph_load_context; 199 engine->graph.load_context = nv20_graph_load_context;
189 engine->graph.unload_context = nv20_graph_unload_context; 200 engine->graph.unload_context = nv20_graph_unload_context;
201 engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
190 engine->fifo.channels = 32; 202 engine->fifo.channels = 32;
191 engine->fifo.init = nv10_fifo_init; 203 engine->fifo.init = nv10_fifo_init;
192 engine->fifo.takedown = nouveau_stub_takedown; 204 engine->fifo.takedown = nouveau_stub_takedown;
193 engine->fifo.disable = nv04_fifo_disable; 205 engine->fifo.disable = nv04_fifo_disable;
194 engine->fifo.enable = nv04_fifo_enable; 206 engine->fifo.enable = nv04_fifo_enable;
195 engine->fifo.reassign = nv04_fifo_reassign; 207 engine->fifo.reassign = nv04_fifo_reassign;
208 engine->fifo.cache_flush = nv04_fifo_cache_flush;
209 engine->fifo.cache_pull = nv04_fifo_cache_pull;
196 engine->fifo.channel_id = nv10_fifo_channel_id; 210 engine->fifo.channel_id = nv10_fifo_channel_id;
197 engine->fifo.create_context = nv10_fifo_create_context; 211 engine->fifo.create_context = nv10_fifo_create_context;
198 engine->fifo.destroy_context = nv10_fifo_destroy_context; 212 engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -218,6 +232,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
218 engine->timer.takedown = nv04_timer_takedown; 232 engine->timer.takedown = nv04_timer_takedown;
219 engine->fb.init = nv40_fb_init; 233 engine->fb.init = nv40_fb_init;
220 engine->fb.takedown = nv40_fb_takedown; 234 engine->fb.takedown = nv40_fb_takedown;
235 engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
221 engine->graph.grclass = nv40_graph_grclass; 236 engine->graph.grclass = nv40_graph_grclass;
222 engine->graph.init = nv40_graph_init; 237 engine->graph.init = nv40_graph_init;
223 engine->graph.takedown = nv40_graph_takedown; 238 engine->graph.takedown = nv40_graph_takedown;
@@ -227,12 +242,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
227 engine->graph.destroy_context = nv40_graph_destroy_context; 242 engine->graph.destroy_context = nv40_graph_destroy_context;
228 engine->graph.load_context = nv40_graph_load_context; 243 engine->graph.load_context = nv40_graph_load_context;
229 engine->graph.unload_context = nv40_graph_unload_context; 244 engine->graph.unload_context = nv40_graph_unload_context;
245 engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
230 engine->fifo.channels = 32; 246 engine->fifo.channels = 32;
231 engine->fifo.init = nv40_fifo_init; 247 engine->fifo.init = nv40_fifo_init;
232 engine->fifo.takedown = nouveau_stub_takedown; 248 engine->fifo.takedown = nouveau_stub_takedown;
233 engine->fifo.disable = nv04_fifo_disable; 249 engine->fifo.disable = nv04_fifo_disable;
234 engine->fifo.enable = nv04_fifo_enable; 250 engine->fifo.enable = nv04_fifo_enable;
235 engine->fifo.reassign = nv04_fifo_reassign; 251 engine->fifo.reassign = nv04_fifo_reassign;
252 engine->fifo.cache_flush = nv04_fifo_cache_flush;
253 engine->fifo.cache_pull = nv04_fifo_cache_pull;
236 engine->fifo.channel_id = nv10_fifo_channel_id; 254 engine->fifo.channel_id = nv10_fifo_channel_id;
237 engine->fifo.create_context = nv40_fifo_create_context; 255 engine->fifo.create_context = nv40_fifo_create_context;
238 engine->fifo.destroy_context = nv40_fifo_destroy_context; 256 engine->fifo.destroy_context = nv40_fifo_destroy_context;
@@ -292,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
292static unsigned int 310static unsigned int
293nouveau_vga_set_decode(void *priv, bool state) 311nouveau_vga_set_decode(void *priv, bool state)
294{ 312{
313 struct drm_device *dev = priv;
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315
316 if (dev_priv->chipset >= 0x40)
317 nv_wr32(dev, 0x88054, state);
318 else
319 nv_wr32(dev, 0x1854, state);
320
295 if (state) 321 if (state)
296 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 322 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
297 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 323 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -409,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
409 if (ret) 435 if (ret)
410 goto out_timer; 436 goto out_timer;
411 437
412 /* PGRAPH */ 438 if (nouveau_noaccel)
413 ret = engine->graph.init(dev); 439 engine->graph.accel_blocked = true;
414 if (ret) 440 else {
415 goto out_fb; 441 /* PGRAPH */
442 ret = engine->graph.init(dev);
443 if (ret)
444 goto out_fb;
416 445
417 /* PFIFO */ 446 /* PFIFO */
418 ret = engine->fifo.init(dev); 447 ret = engine->fifo.init(dev);
419 if (ret) 448 if (ret)
420 goto out_graph; 449 goto out_graph;
450 }
421 451
422 /* this call irq_preinstall, register irq handler and 452 /* this call irq_preinstall, register irq handler and
423 * call irq_postinstall 453 * call irq_postinstall
@@ -461,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
461out_irq: 491out_irq:
462 drm_irq_uninstall(dev); 492 drm_irq_uninstall(dev);
463out_fifo: 493out_fifo:
464 engine->fifo.takedown(dev); 494 if (!nouveau_noaccel)
495 engine->fifo.takedown(dev);
465out_graph: 496out_graph:
466 engine->graph.takedown(dev); 497 if (!nouveau_noaccel)
498 engine->graph.takedown(dev);
467out_fb: 499out_fb:
468 engine->fb.takedown(dev); 500 engine->fb.takedown(dev);
469out_timer: 501out_timer:
@@ -500,13 +532,16 @@ static void nouveau_card_takedown(struct drm_device *dev)
500 dev_priv->channel = NULL; 532 dev_priv->channel = NULL;
501 } 533 }
502 534
503 engine->fifo.takedown(dev); 535 if (!nouveau_noaccel) {
504 engine->graph.takedown(dev); 536 engine->fifo.takedown(dev);
537 engine->graph.takedown(dev);
538 }
505 engine->fb.takedown(dev); 539 engine->fb.takedown(dev);
506 engine->timer.takedown(dev); 540 engine->timer.takedown(dev);
507 engine->mc.takedown(dev); 541 engine->mc.takedown(dev);
508 542
509 mutex_lock(&dev->struct_mutex); 543 mutex_lock(&dev->struct_mutex);
544 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
510 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 545 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
511 mutex_unlock(&dev->struct_mutex); 546 mutex_unlock(&dev->struct_mutex);
512 nouveau_sgdma_takedown(dev); 547 nouveau_sgdma_takedown(dev);
@@ -624,7 +659,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
624 dev_priv->chipset = (reg0 & 0xff00000) >> 20; 659 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
625 /* NV04 or NV05 */ 660 /* NV04 or NV05 */
626 } else if ((reg0 & 0xff00fff0) == 0x20004000) { 661 } else if ((reg0 & 0xff00fff0) == 0x20004000) {
627 dev_priv->chipset = 0x04; 662 if (reg0 & 0x00f00000)
663 dev_priv->chipset = 0x05;
664 else
665 dev_priv->chipset = 0x04;
628 } else 666 } else
629 dev_priv->chipset = 0xff; 667 dev_priv->chipset = 0xff;
630 668
@@ -704,8 +742,8 @@ static void nouveau_close(struct drm_device *dev)
704{ 742{
705 struct drm_nouveau_private *dev_priv = dev->dev_private; 743 struct drm_nouveau_private *dev_priv = dev->dev_private;
706 744
707 /* In the case of an error dev_priv may not be be allocated yet */ 745 /* In the case of an error dev_priv may not be allocated yet */
708 if (dev_priv && dev_priv->card_type) 746 if (dev_priv)
709 nouveau_card_takedown(dev); 747 nouveau_card_takedown(dev);
710} 748}
711 749
@@ -795,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
795 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 833 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
796 getparam->value = dev_priv->vm_vram_base; 834 getparam->value = dev_priv->vm_vram_base;
797 break; 835 break;
836 case NOUVEAU_GETPARAM_GRAPH_UNITS:
837 /* NV40 and NV50 versions are quite different, but register
838 * address is the same. User is supposed to know the card
839 * family anyway... */
840 if (dev_priv->chipset >= 0x40) {
841 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
842 break;
843 }
844 /* FALLTHRU */
798 default: 845 default:
799 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 846 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
800 return -EINVAL; 847 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 187eb84e4da5..c385d50f041b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,45 +28,17 @@
28 28
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30 30
31static struct vm_operations_struct nouveau_ttm_vm_ops;
32static const struct vm_operations_struct *ttm_vm_ops;
33
34static int
35nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36{
37 struct ttm_buffer_object *bo = vma->vm_private_data;
38 int ret;
39
40 if (unlikely(bo == NULL))
41 return VM_FAULT_NOPAGE;
42
43 ret = ttm_vm_ops->fault(vma, vmf);
44 return ret;
45}
46
47int 31int
48nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) 32nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
49{ 33{
50 struct drm_file *file_priv = filp->private_data; 34 struct drm_file *file_priv = filp->private_data;
51 struct drm_nouveau_private *dev_priv = 35 struct drm_nouveau_private *dev_priv =
52 file_priv->minor->dev->dev_private; 36 file_priv->minor->dev->dev_private;
53 int ret;
54 37
55 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 38 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
56 return drm_mmap(filp, vma); 39 return drm_mmap(filp, vma);
57 40
58 ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); 41 return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
59 if (unlikely(ret != 0))
60 return ret;
61
62 if (unlikely(ttm_vm_ops == NULL)) {
63 ttm_vm_ops = vma->vm_ops;
64 nouveau_ttm_vm_ops = *ttm_vm_ops;
65 nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
66 }
67
68 vma->vm_ops = &nouveau_ttm_vm_ops;
69 return 0;
70} 42}
71 43
72static int 44static int
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index d9f32879ba38..d0e038d28948 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -212,16 +212,15 @@ out:
212 return connector_status_disconnected; 212 return connector_status_disconnected;
213} 213}
214 214
215enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, 215uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
216 struct drm_connector *connector)
217{ 216{
218 struct drm_device *dev = encoder->dev; 217 struct drm_device *dev = encoder->dev;
219 struct drm_nouveau_private *dev_priv = dev->dev_private; 218 struct drm_nouveau_private *dev_priv = dev->dev_private;
220 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 219 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
221 uint32_t testval, regoffset = nv04_dac_output_offset(encoder); 220 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
222 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, 221 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
223 saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput; 222 saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
224 int head, present = 0; 223 int head;
225 224
226#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) 225#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
227 if (dcb->type == OUTPUT_TV) { 226 if (dcb->type == OUTPUT_TV) {
@@ -287,13 +286,7 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
287 temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED); 286 temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
288 msleep(5); 287 msleep(5);
289 288
290 temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); 289 sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
291
292 if (dcb->type == OUTPUT_TV)
293 present = (nv17_tv_detect(encoder, connector, temp)
294 == connector_status_connected);
295 else
296 present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
297 290
298 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); 291 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
299 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, 292 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
@@ -310,15 +303,25 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
310 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1); 303 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
311 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0); 304 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
312 305
313 if (present) { 306 return sample;
314 NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); 307}
308
309static enum drm_connector_status
310nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
311{
312 struct drm_device *dev = encoder->dev;
313 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
314 uint32_t sample = nv17_dac_sample_load(encoder);
315
316 if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
317 NV_INFO(dev, "Load detected on output %c\n",
318 '@' + ffs(dcb->or));
315 return connector_status_connected; 319 return connector_status_connected;
320 } else {
321 return connector_status_disconnected;
316 } 322 }
317
318 return connector_status_disconnected;
319} 323}
320 324
321
322static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, 325static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
323 struct drm_display_mode *mode, 326 struct drm_display_mode *mode,
324 struct drm_display_mode *adjusted_mode) 327 struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 09a31071ee58..fd01caabd5c3 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
27#include "nouveau_dma.h" 27#include "nouveau_dma.h"
28#include "nouveau_fbcon.h" 28#include "nouveau_fbcon.h"
29 29
30static void 30void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{ 32{
33 struct nouveau_fbcon_par *par = info->par; 33 struct nouveau_fbcon_par *par = info->par;
@@ -39,8 +39,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
39 return; 39 return;
40 40
41 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { 41 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
42 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 42 nouveau_fbcon_gpu_lockup(info);
43 info->flags |= FBINFO_HWACCEL_DISABLED;
44 } 43 }
45 44
46 if (info->flags & FBINFO_HWACCEL_DISABLED) { 45 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -55,21 +54,19 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
55 FIRE_RING(chan); 54 FIRE_RING(chan);
56} 55}
57 56
58static void 57void
59nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
60{ 59{
61 struct nouveau_fbcon_par *par = info->par; 60 struct nouveau_fbcon_par *par = info->par;
62 struct drm_device *dev = par->dev; 61 struct drm_device *dev = par->dev;
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 62 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan = dev_priv->channel; 63 struct nouveau_channel *chan = dev_priv->channel;
65 uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
66 64
67 if (info->state != FBINFO_STATE_RUNNING) 65 if (info->state != FBINFO_STATE_RUNNING)
68 return; 66 return;
69 67
70 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { 68 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
71 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 69 nouveau_fbcon_gpu_lockup(info);
72 info->flags |= FBINFO_HWACCEL_DISABLED;
73 } 70 }
74 71
75 if (info->flags & FBINFO_HWACCEL_DISABLED) { 72 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -80,14 +77,18 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
80 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); 77 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
81 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); 78 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
82 BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1); 79 BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
83 OUT_RING(chan, color); 80 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
81 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
82 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
83 else
84 OUT_RING(chan, rect->color);
84 BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2); 85 BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
85 OUT_RING(chan, (rect->dx << 16) | rect->dy); 86 OUT_RING(chan, (rect->dx << 16) | rect->dy);
86 OUT_RING(chan, (rect->width << 16) | rect->height); 87 OUT_RING(chan, (rect->width << 16) | rect->height);
87 FIRE_RING(chan); 88 FIRE_RING(chan);
88} 89}
89 90
90static void 91void
91nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
92{ 93{
93 struct nouveau_fbcon_par *par = info->par; 94 struct nouveau_fbcon_par *par = info->par;
@@ -109,8 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
109 } 110 }
110 111
111 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { 112 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
112 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 113 nouveau_fbcon_gpu_lockup(info);
113 info->flags |= FBINFO_HWACCEL_DISABLED;
114 } 114 }
115 115
116 if (info->flags & FBINFO_HWACCEL_DISABLED) { 116 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -144,8 +144,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
144 int iter_len = dsize > 128 ? 128 : dsize; 144 int iter_len = dsize > 128 ? 128 : dsize;
145 145
146 if (RING_SPACE(chan, iter_len + 1)) { 146 if (RING_SPACE(chan, iter_len + 1)) {
147 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 147 nouveau_fbcon_gpu_lockup(info);
148 info->flags |= FBINFO_HWACCEL_DISABLED;
149 cfb_imageblit(info, image); 148 cfb_imageblit(info, image);
150 return; 149 return;
151 } 150 }
@@ -184,6 +183,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
184 struct drm_device *dev = par->dev; 183 struct drm_device *dev = par->dev;
185 struct drm_nouveau_private *dev_priv = dev->dev_private; 184 struct drm_nouveau_private *dev_priv = dev->dev_private;
186 struct nouveau_channel *chan = dev_priv->channel; 185 struct nouveau_channel *chan = dev_priv->channel;
186 const int sub = NvSubCtxSurf2D;
187 int surface_fmt, pattern_fmt, rect_fmt; 187 int surface_fmt, pattern_fmt, rect_fmt;
188 int ret; 188 int ret;
189 189
@@ -242,30 +242,29 @@ nv04_fbcon_accel_init(struct fb_info *info)
242 return ret; 242 return ret;
243 243
244 if (RING_SPACE(chan, 49)) { 244 if (RING_SPACE(chan, 49)) {
245 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 245 nouveau_fbcon_gpu_lockup(info);
246 info->flags |= FBINFO_HWACCEL_DISABLED;
247 return 0; 246 return 0;
248 } 247 }
249 248
250 BEGIN_RING(chan, 1, 0x0000, 1); 249 BEGIN_RING(chan, sub, 0x0000, 1);
251 OUT_RING(chan, NvCtxSurf2D); 250 OUT_RING(chan, NvCtxSurf2D);
252 BEGIN_RING(chan, 1, 0x0184, 2); 251 BEGIN_RING(chan, sub, 0x0184, 2);
253 OUT_RING(chan, NvDmaFB); 252 OUT_RING(chan, NvDmaFB);
254 OUT_RING(chan, NvDmaFB); 253 OUT_RING(chan, NvDmaFB);
255 BEGIN_RING(chan, 1, 0x0300, 4); 254 BEGIN_RING(chan, sub, 0x0300, 4);
256 OUT_RING(chan, surface_fmt); 255 OUT_RING(chan, surface_fmt);
257 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); 256 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
258 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 257 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
259 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 258 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
260 259
261 BEGIN_RING(chan, 1, 0x0000, 1); 260 BEGIN_RING(chan, sub, 0x0000, 1);
262 OUT_RING(chan, NvRop); 261 OUT_RING(chan, NvRop);
263 BEGIN_RING(chan, 1, 0x0300, 1); 262 BEGIN_RING(chan, sub, 0x0300, 1);
264 OUT_RING(chan, 0x55); 263 OUT_RING(chan, 0x55);
265 264
266 BEGIN_RING(chan, 1, 0x0000, 1); 265 BEGIN_RING(chan, sub, 0x0000, 1);
267 OUT_RING(chan, NvImagePatt); 266 OUT_RING(chan, NvImagePatt);
268 BEGIN_RING(chan, 1, 0x0300, 8); 267 BEGIN_RING(chan, sub, 0x0300, 8);
269 OUT_RING(chan, pattern_fmt); 268 OUT_RING(chan, pattern_fmt);
270#ifdef __BIG_ENDIAN 269#ifdef __BIG_ENDIAN
271 OUT_RING(chan, 2); 270 OUT_RING(chan, 2);
@@ -279,9 +278,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
279 OUT_RING(chan, ~0); 278 OUT_RING(chan, ~0);
280 OUT_RING(chan, ~0); 279 OUT_RING(chan, ~0);
281 280
282 BEGIN_RING(chan, 1, 0x0000, 1); 281 BEGIN_RING(chan, sub, 0x0000, 1);
283 OUT_RING(chan, NvClipRect); 282 OUT_RING(chan, NvClipRect);
284 BEGIN_RING(chan, 1, 0x0300, 2); 283 BEGIN_RING(chan, sub, 0x0300, 2);
285 OUT_RING(chan, 0); 284 OUT_RING(chan, 0);
286 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); 285 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
287 286
@@ -308,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
308 307
309 FIRE_RING(chan); 308 FIRE_RING(chan);
310 309
311 info->fbops->fb_fillrect = nv04_fbcon_fillrect;
312 info->fbops->fb_copyarea = nv04_fbcon_copyarea;
313 info->fbops->fb_imageblit = nv04_fbcon_imageblit;
314 return 0; 310 return 0;
315} 311}
316 312
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 0c3cd53c7313..f31347b8c9b0 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -71,6 +71,40 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
71 return (reassign == 1); 71 return (reassign == 1);
72} 72}
73 73
74bool
75nv04_fifo_cache_flush(struct drm_device *dev)
76{
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
79 uint64_t start = ptimer->read(dev);
80
81 do {
82 if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
83 nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
84 return true;
85
86 } while (ptimer->read(dev) - start < 100000000);
87
88 NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
89
90 return false;
91}
92
93bool
94nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
95{
96 uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
97
98 if (enable) {
99 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
100 } else {
101 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
102 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
103 }
104
105 return !!(pull & 1);
106}
107
74int 108int
75nv04_fifo_channel_id(struct drm_device *dev) 109nv04_fifo_channel_id(struct drm_device *dev)
76{ 110{
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index d561d773c0f4..e260986ea65a 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -28,6 +28,10 @@
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29 29
30static uint32_t nv04_graph_ctx_regs[] = { 30static uint32_t nv04_graph_ctx_regs[] = {
31 0x0040053c,
32 0x00400544,
33 0x00400540,
34 0x00400548,
31 NV04_PGRAPH_CTX_SWITCH1, 35 NV04_PGRAPH_CTX_SWITCH1,
32 NV04_PGRAPH_CTX_SWITCH2, 36 NV04_PGRAPH_CTX_SWITCH2,
33 NV04_PGRAPH_CTX_SWITCH3, 37 NV04_PGRAPH_CTX_SWITCH3,
@@ -102,69 +106,69 @@ static uint32_t nv04_graph_ctx_regs[] = {
102 NV04_PGRAPH_PATT_COLOR0, 106 NV04_PGRAPH_PATT_COLOR0,
103 NV04_PGRAPH_PATT_COLOR1, 107 NV04_PGRAPH_PATT_COLOR1,
104 NV04_PGRAPH_PATT_COLORRAM+0x00, 108 NV04_PGRAPH_PATT_COLORRAM+0x00,
105 NV04_PGRAPH_PATT_COLORRAM+0x01,
106 NV04_PGRAPH_PATT_COLORRAM+0x02,
107 NV04_PGRAPH_PATT_COLORRAM+0x03,
108 NV04_PGRAPH_PATT_COLORRAM+0x04, 109 NV04_PGRAPH_PATT_COLORRAM+0x04,
109 NV04_PGRAPH_PATT_COLORRAM+0x05,
110 NV04_PGRAPH_PATT_COLORRAM+0x06,
111 NV04_PGRAPH_PATT_COLORRAM+0x07,
112 NV04_PGRAPH_PATT_COLORRAM+0x08, 110 NV04_PGRAPH_PATT_COLORRAM+0x08,
113 NV04_PGRAPH_PATT_COLORRAM+0x09, 111 NV04_PGRAPH_PATT_COLORRAM+0x0c,
114 NV04_PGRAPH_PATT_COLORRAM+0x0A,
115 NV04_PGRAPH_PATT_COLORRAM+0x0B,
116 NV04_PGRAPH_PATT_COLORRAM+0x0C,
117 NV04_PGRAPH_PATT_COLORRAM+0x0D,
118 NV04_PGRAPH_PATT_COLORRAM+0x0E,
119 NV04_PGRAPH_PATT_COLORRAM+0x0F,
120 NV04_PGRAPH_PATT_COLORRAM+0x10, 112 NV04_PGRAPH_PATT_COLORRAM+0x10,
121 NV04_PGRAPH_PATT_COLORRAM+0x11,
122 NV04_PGRAPH_PATT_COLORRAM+0x12,
123 NV04_PGRAPH_PATT_COLORRAM+0x13,
124 NV04_PGRAPH_PATT_COLORRAM+0x14, 113 NV04_PGRAPH_PATT_COLORRAM+0x14,
125 NV04_PGRAPH_PATT_COLORRAM+0x15,
126 NV04_PGRAPH_PATT_COLORRAM+0x16,
127 NV04_PGRAPH_PATT_COLORRAM+0x17,
128 NV04_PGRAPH_PATT_COLORRAM+0x18, 114 NV04_PGRAPH_PATT_COLORRAM+0x18,
129 NV04_PGRAPH_PATT_COLORRAM+0x19, 115 NV04_PGRAPH_PATT_COLORRAM+0x1c,
130 NV04_PGRAPH_PATT_COLORRAM+0x1A,
131 NV04_PGRAPH_PATT_COLORRAM+0x1B,
132 NV04_PGRAPH_PATT_COLORRAM+0x1C,
133 NV04_PGRAPH_PATT_COLORRAM+0x1D,
134 NV04_PGRAPH_PATT_COLORRAM+0x1E,
135 NV04_PGRAPH_PATT_COLORRAM+0x1F,
136 NV04_PGRAPH_PATT_COLORRAM+0x20, 116 NV04_PGRAPH_PATT_COLORRAM+0x20,
137 NV04_PGRAPH_PATT_COLORRAM+0x21,
138 NV04_PGRAPH_PATT_COLORRAM+0x22,
139 NV04_PGRAPH_PATT_COLORRAM+0x23,
140 NV04_PGRAPH_PATT_COLORRAM+0x24, 117 NV04_PGRAPH_PATT_COLORRAM+0x24,
141 NV04_PGRAPH_PATT_COLORRAM+0x25,
142 NV04_PGRAPH_PATT_COLORRAM+0x26,
143 NV04_PGRAPH_PATT_COLORRAM+0x27,
144 NV04_PGRAPH_PATT_COLORRAM+0x28, 118 NV04_PGRAPH_PATT_COLORRAM+0x28,
145 NV04_PGRAPH_PATT_COLORRAM+0x29, 119 NV04_PGRAPH_PATT_COLORRAM+0x2c,
146 NV04_PGRAPH_PATT_COLORRAM+0x2A,
147 NV04_PGRAPH_PATT_COLORRAM+0x2B,
148 NV04_PGRAPH_PATT_COLORRAM+0x2C,
149 NV04_PGRAPH_PATT_COLORRAM+0x2D,
150 NV04_PGRAPH_PATT_COLORRAM+0x2E,
151 NV04_PGRAPH_PATT_COLORRAM+0x2F,
152 NV04_PGRAPH_PATT_COLORRAM+0x30, 120 NV04_PGRAPH_PATT_COLORRAM+0x30,
153 NV04_PGRAPH_PATT_COLORRAM+0x31,
154 NV04_PGRAPH_PATT_COLORRAM+0x32,
155 NV04_PGRAPH_PATT_COLORRAM+0x33,
156 NV04_PGRAPH_PATT_COLORRAM+0x34, 121 NV04_PGRAPH_PATT_COLORRAM+0x34,
157 NV04_PGRAPH_PATT_COLORRAM+0x35,
158 NV04_PGRAPH_PATT_COLORRAM+0x36,
159 NV04_PGRAPH_PATT_COLORRAM+0x37,
160 NV04_PGRAPH_PATT_COLORRAM+0x38, 122 NV04_PGRAPH_PATT_COLORRAM+0x38,
161 NV04_PGRAPH_PATT_COLORRAM+0x39, 123 NV04_PGRAPH_PATT_COLORRAM+0x3c,
162 NV04_PGRAPH_PATT_COLORRAM+0x3A, 124 NV04_PGRAPH_PATT_COLORRAM+0x40,
163 NV04_PGRAPH_PATT_COLORRAM+0x3B, 125 NV04_PGRAPH_PATT_COLORRAM+0x44,
164 NV04_PGRAPH_PATT_COLORRAM+0x3C, 126 NV04_PGRAPH_PATT_COLORRAM+0x48,
165 NV04_PGRAPH_PATT_COLORRAM+0x3D, 127 NV04_PGRAPH_PATT_COLORRAM+0x4c,
166 NV04_PGRAPH_PATT_COLORRAM+0x3E, 128 NV04_PGRAPH_PATT_COLORRAM+0x50,
167 NV04_PGRAPH_PATT_COLORRAM+0x3F, 129 NV04_PGRAPH_PATT_COLORRAM+0x54,
130 NV04_PGRAPH_PATT_COLORRAM+0x58,
131 NV04_PGRAPH_PATT_COLORRAM+0x5c,
132 NV04_PGRAPH_PATT_COLORRAM+0x60,
133 NV04_PGRAPH_PATT_COLORRAM+0x64,
134 NV04_PGRAPH_PATT_COLORRAM+0x68,
135 NV04_PGRAPH_PATT_COLORRAM+0x6c,
136 NV04_PGRAPH_PATT_COLORRAM+0x70,
137 NV04_PGRAPH_PATT_COLORRAM+0x74,
138 NV04_PGRAPH_PATT_COLORRAM+0x78,
139 NV04_PGRAPH_PATT_COLORRAM+0x7c,
140 NV04_PGRAPH_PATT_COLORRAM+0x80,
141 NV04_PGRAPH_PATT_COLORRAM+0x84,
142 NV04_PGRAPH_PATT_COLORRAM+0x88,
143 NV04_PGRAPH_PATT_COLORRAM+0x8c,
144 NV04_PGRAPH_PATT_COLORRAM+0x90,
145 NV04_PGRAPH_PATT_COLORRAM+0x94,
146 NV04_PGRAPH_PATT_COLORRAM+0x98,
147 NV04_PGRAPH_PATT_COLORRAM+0x9c,
148 NV04_PGRAPH_PATT_COLORRAM+0xa0,
149 NV04_PGRAPH_PATT_COLORRAM+0xa4,
150 NV04_PGRAPH_PATT_COLORRAM+0xa8,
151 NV04_PGRAPH_PATT_COLORRAM+0xac,
152 NV04_PGRAPH_PATT_COLORRAM+0xb0,
153 NV04_PGRAPH_PATT_COLORRAM+0xb4,
154 NV04_PGRAPH_PATT_COLORRAM+0xb8,
155 NV04_PGRAPH_PATT_COLORRAM+0xbc,
156 NV04_PGRAPH_PATT_COLORRAM+0xc0,
157 NV04_PGRAPH_PATT_COLORRAM+0xc4,
158 NV04_PGRAPH_PATT_COLORRAM+0xc8,
159 NV04_PGRAPH_PATT_COLORRAM+0xcc,
160 NV04_PGRAPH_PATT_COLORRAM+0xd0,
161 NV04_PGRAPH_PATT_COLORRAM+0xd4,
162 NV04_PGRAPH_PATT_COLORRAM+0xd8,
163 NV04_PGRAPH_PATT_COLORRAM+0xdc,
164 NV04_PGRAPH_PATT_COLORRAM+0xe0,
165 NV04_PGRAPH_PATT_COLORRAM+0xe4,
166 NV04_PGRAPH_PATT_COLORRAM+0xe8,
167 NV04_PGRAPH_PATT_COLORRAM+0xec,
168 NV04_PGRAPH_PATT_COLORRAM+0xf0,
169 NV04_PGRAPH_PATT_COLORRAM+0xf4,
170 NV04_PGRAPH_PATT_COLORRAM+0xf8,
171 NV04_PGRAPH_PATT_COLORRAM+0xfc,
168 NV04_PGRAPH_PATTERN, 172 NV04_PGRAPH_PATTERN,
169 0x0040080c, 173 0x0040080c,
170 NV04_PGRAPH_PATTERN_SHAPE, 174 NV04_PGRAPH_PATTERN_SHAPE,
@@ -247,14 +251,6 @@ static uint32_t nv04_graph_ctx_regs[] = {
247 0x004004f8, 251 0x004004f8,
248 0x0040047c, 252 0x0040047c,
249 0x004004fc, 253 0x004004fc,
250 0x0040053c,
251 0x00400544,
252 0x00400540,
253 0x00400548,
254 0x00400560,
255 0x00400568,
256 0x00400564,
257 0x0040056c,
258 0x00400534, 254 0x00400534,
259 0x00400538, 255 0x00400538,
260 0x00400514, 256 0x00400514,
@@ -341,9 +337,8 @@ static uint32_t nv04_graph_ctx_regs[] = {
341 0x00400500, 337 0x00400500,
342 0x00400504, 338 0x00400504,
343 NV04_PGRAPH_VALID1, 339 NV04_PGRAPH_VALID1,
344 NV04_PGRAPH_VALID2 340 NV04_PGRAPH_VALID2,
345 341 NV04_PGRAPH_DEBUG_3
346
347}; 342};
348 343
349struct graph_state { 344struct graph_state {
@@ -388,6 +383,18 @@ nv04_graph_context_switch(struct drm_device *dev)
388 pgraph->fifo_access(dev, true); 383 pgraph->fifo_access(dev, true);
389} 384}
390 385
386static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
387{
388 int i;
389
390 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
391 if (nv04_graph_ctx_regs[i] == reg)
392 return &ctx->nv04[i];
393 }
394
395 return NULL;
396}
397
391int nv04_graph_create_context(struct nouveau_channel *chan) 398int nv04_graph_create_context(struct nouveau_channel *chan)
392{ 399{
393 struct graph_state *pgraph_ctx; 400 struct graph_state *pgraph_ctx;
@@ -398,15 +405,8 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
398 if (pgraph_ctx == NULL) 405 if (pgraph_ctx == NULL)
399 return -ENOMEM; 406 return -ENOMEM;
400 407
401 /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */ 408 *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
402 pgraph_ctx->nv04[0] = 0x0001ffff; 409
403 /* is it really needed ??? */
404#if 0
405 dev_priv->fifos[channel].pgraph_ctx[1] =
406 nv_rd32(dev, NV_PGRAPH_DEBUG_4);
407 dev_priv->fifos[channel].pgraph_ctx[2] =
408 nv_rd32(dev, 0x004006b0);
409#endif
410 return 0; 410 return 0;
411} 411}
412 412
@@ -429,9 +429,13 @@ int nv04_graph_load_context(struct nouveau_channel *chan)
429 nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); 429 nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
430 430
431 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100); 431 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
432 nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24); 432
433 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
434 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
435
433 tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2); 436 tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
434 nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff); 437 nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
438
435 return 0; 439 return 0;
436} 440}
437 441
@@ -494,7 +498,7 @@ int nv04_graph_init(struct drm_device *dev)
494 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); 498 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
495 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); 499 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
496 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; 500 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
497 tmp |= dev_priv->engine.fifo.channels << 24; 501 tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
498 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); 502 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
499 503
500 /* These don't belong here, they're part of a per-channel context */ 504 /* These don't belong here, they're part of a per-channel context */
@@ -533,7 +537,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
533 int mthd, uint32_t data) 537 int mthd, uint32_t data)
534{ 538{
535 struct drm_device *dev = chan->dev; 539 struct drm_device *dev = chan->dev;
536 uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff; 540 uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
537 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; 541 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
538 uint32_t tmp; 542 uint32_t tmp;
539 543
@@ -547,7 +551,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
547 return 0; 551 return 0;
548} 552}
549 553
550static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = { 554static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
551 { 0x0150, nv04_graph_mthd_set_ref }, 555 { 0x0150, nv04_graph_mthd_set_ref },
552 {} 556 {}
553}; 557};
@@ -558,7 +562,7 @@ static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
558}; 562};
559 563
560struct nouveau_pgraph_object_class nv04_graph_grclass[] = { 564struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
561 { 0x0039, false, nv04_graph_mthds_m2mf }, 565 { 0x0039, false, NULL },
562 { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ 566 { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
563 { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ 567 { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
564 { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ 568 { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
@@ -574,6 +578,7 @@ struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
574 { 0x0053, false, NULL }, /* surf3d */ 578 { 0x0053, false, NULL }, /* surf3d */
575 { 0x0054, false, NULL }, /* tex_tri */ 579 { 0x0054, false, NULL }, /* tex_tri */
576 { 0x0055, false, NULL }, /* multitex_tri */ 580 { 0x0055, false, NULL }, /* multitex_tri */
581 { 0x506e, true, nv04_graph_mthds_sw },
577 {} 582 {}
578}; 583};
579 584
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index a20c206625a2..a3b9563a6f60 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -30,7 +30,7 @@ nv04_instmem_determine_amount(struct drm_device *dev)
30 * of vram. For now, only reserve a small piece until we know 30 * of vram. For now, only reserve a small piece until we know
31 * more about what each chipset requires. 31 * more about what each chipset requires.
32 */ 32 */
33 switch (dev_priv->chipset & 0xf0) { 33 switch (dev_priv->chipset) {
34 case 0x40: 34 case 0x40:
35 case 0x47: 35 case 0x47:
36 case 0x49: 36 case 0x49:
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index 79e2d104d70a..cc5cda44e501 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,17 +3,37 @@
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6void
7nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
8 uint32_t size, uint32_t pitch)
9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1;
12
13 if (pitch) {
14 if (dev_priv->card_type >= NV_20)
15 addr |= 1;
16 else
17 addr |= 1 << 31;
18 }
19
20 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
21 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
22 nv_wr32(dev, NV10_PFB_TILE(i), addr);
23}
24
6int 25int
7nv10_fb_init(struct drm_device *dev) 26nv10_fb_init(struct drm_device *dev)
8{ 27{
9 uint32_t fb_bar_size; 28 struct drm_nouveau_private *dev_priv = dev->dev_private;
29 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
10 int i; 30 int i;
11 31
12 fb_bar_size = drm_get_resource_len(dev, 0) - 1; 32 pfb->num_tiles = NV10_PFB_TILE__SIZE;
13 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { 33
14 nv_wr32(dev, NV10_PFB_TILE(i), 0); 34 /* Turn all the tiling regions off. */
15 nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size); 35 for (i = 0; i < pfb->num_tiles; i++)
16 } 36 pfb->set_region_tiling(dev, i, 0, 0, 0);
17 37
18 return 0; 38 return 0;
19} 39}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 6870e0ee2e7e..fcf2cdd19493 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -807,6 +807,20 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)
807 chan->pgraph_ctx = NULL; 807 chan->pgraph_ctx = NULL;
808} 808}
809 809
810void
811nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
812 uint32_t size, uint32_t pitch)
813{
814 uint32_t limit = max(1u, addr + size) - 1;
815
816 if (pitch)
817 addr |= 1 << 31;
818
819 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
820 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
821 nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
822}
823
810int nv10_graph_init(struct drm_device *dev) 824int nv10_graph_init(struct drm_device *dev)
811{ 825{
812 struct drm_nouveau_private *dev_priv = dev->dev_private; 826 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -838,17 +852,9 @@ int nv10_graph_init(struct drm_device *dev)
838 } else 852 } else
839 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); 853 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
840 854
841 /* copy tile info from PFB */ 855 /* Turn all the tiling regions off. */
842 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { 856 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
843 nv_wr32(dev, NV10_PGRAPH_TILE(i), 857 nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
844 nv_rd32(dev, NV10_PFB_TILE(i)));
845 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
846 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
847 nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
848 nv_rd32(dev, NV10_PFB_TSIZE(i)));
849 nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
850 nv_rd32(dev, NV10_PFB_TSTATUS(i)));
851 }
852 858
853 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000); 859 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
854 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000); 860 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 81c01353a9f9..21ac6e49b6ee 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -33,13 +33,103 @@
33#include "nouveau_hw.h" 33#include "nouveau_hw.h"
34#include "nv17_tv.h" 34#include "nv17_tv.h"
35 35
36enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, 36static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
37 struct drm_connector *connector,
38 uint32_t pin_mask)
39{ 37{
38 struct drm_device *dev = encoder->dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
41 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
42 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
43 uint32_t sample = 0;
44 int head;
45
46#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
47 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
48 if (dev_priv->vbios->tvdactestval)
49 testval = dev_priv->vbios->tvdactestval;
50
51 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
52 head = (dacclk & 0x100) >> 8;
53
54 /* Save the previous state. */
55 gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
56 gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
57 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
58 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
59 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
60 fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
61 test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
62 ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
63 ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
64 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
65
66 /* Prepare the DAC for load detection. */
67 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true);
68 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true);
69
70 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
71 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
72 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
73 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
74 NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
75 NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
76 NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
77 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
78 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
79
80 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
81
82 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
83 (dacclk & ~0xff) | 0x22);
84 msleep(1);
85 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
86 (dacclk & ~0xff) | 0x21);
87
88 NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
89 NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
90
91 /* Sample pin 0x4 (usually S-video luma). */
92 NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
93 msleep(20);
94 sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
95 & 0x4 << 28;
96
97 /* Sample the remaining pins. */
98 NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
99 msleep(20);
100 sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
101 & 0xa << 28;
102
103 /* Restore the previous state. */
104 NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
105 NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
106 NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
107 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
108 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
109 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
110 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
111 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
112 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
113 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1);
114 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0);
115
116 return sample;
117}
118
119static enum drm_connector_status
120nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
121{
122 struct drm_device *dev = encoder->dev;
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124 struct drm_mode_config *conf = &dev->mode_config;
40 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 125 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
126 struct dcb_entry *dcb = tv_enc->base.dcb;
41 127
42 tv_enc->pin_mask = pin_mask >> 28 & 0xe; 128 if (dev_priv->chipset == 0x42 ||
129 dev_priv->chipset == 0x43)
130 tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe;
131 else
132 tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe;
43 133
44 switch (tv_enc->pin_mask) { 134 switch (tv_enc->pin_mask) {
45 case 0x2: 135 case 0x2:
@@ -50,7 +140,7 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
50 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; 140 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
51 break; 141 break;
52 case 0xe: 142 case 0xe:
53 if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output) 143 if (dcb->tvconf.has_component_output)
54 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component; 144 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
55 else 145 else
56 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART; 146 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
@@ -61,11 +151,16 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
61 } 151 }
62 152
63 drm_connector_property_set_value(connector, 153 drm_connector_property_set_value(connector,
64 encoder->dev->mode_config.tv_subconnector_property, 154 conf->tv_subconnector_property,
65 tv_enc->subconnector); 155 tv_enc->subconnector);
66 156
67 return tv_enc->subconnector ? connector_status_connected : 157 if (tv_enc->subconnector) {
68 connector_status_disconnected; 158 NV_INFO(dev, "Load detected on output %c\n",
159 '@' + ffs(dcb->or));
160 return connector_status_connected;
161 } else {
162 return connector_status_disconnected;
163 }
69} 164}
70 165
71static const struct { 166static const struct {
@@ -484,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
484 nouveau_encoder(encoder)->restore.output); 579 nouveau_encoder(encoder)->restore.output);
485 580
486 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); 581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
582
583 nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
487} 584}
488 585
489static int nv17_tv_create_resources(struct drm_encoder *encoder, 586static int nv17_tv_create_resources(struct drm_encoder *encoder,
@@ -633,7 +730,7 @@ static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
633 .prepare = nv17_tv_prepare, 730 .prepare = nv17_tv_prepare,
634 .commit = nv17_tv_commit, 731 .commit = nv17_tv_commit,
635 .mode_set = nv17_tv_mode_set, 732 .mode_set = nv17_tv_mode_set,
636 .detect = nv17_dac_detect, 733 .detect = nv17_tv_detect,
637}; 734};
638 735
639static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { 736static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 18ba74f19703..d6fc0a82f03d 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -514,6 +514,27 @@ nv20_graph_rdi(struct drm_device *dev)
514 nouveau_wait_for_idle(dev); 514 nouveau_wait_for_idle(dev);
515} 515}
516 516
517void
518nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
519 uint32_t size, uint32_t pitch)
520{
521 uint32_t limit = max(1u, addr + size) - 1;
522
523 if (pitch)
524 addr |= 1;
525
526 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
527 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
528 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
529
530 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
531 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
532 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
533 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
534 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
535 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
536}
537
517int 538int
518nv20_graph_init(struct drm_device *dev) 539nv20_graph_init(struct drm_device *dev)
519{ 540{
@@ -572,27 +593,10 @@ nv20_graph_init(struct drm_device *dev)
572 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); 593 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
573 } 594 }
574 595
575 /* copy tile info from PFB */ 596 /* Turn all the tiling regions off. */
576 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { 597 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
577 nv_wr32(dev, 0x00400904 + i * 0x10, 598 nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
578 nv_rd32(dev, NV10_PFB_TLIMIT(i))); 599
579 /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
580 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
581 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
582 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
583 nv_wr32(dev, 0x00400908 + i * 0x10,
584 nv_rd32(dev, NV10_PFB_TSIZE(i)));
585 /* which is NV40_PGRAPH_TSIZE0(i) ?? */
586 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
587 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
588 nv_rd32(dev, NV10_PFB_TSIZE(i)));
589 nv_wr32(dev, 0x00400900 + i * 0x10,
590 nv_rd32(dev, NV10_PFB_TILE(i)));
591 /* which is NV40_PGRAPH_TILE0(i) ?? */
592 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
593 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
594 nv_rd32(dev, NV10_PFB_TILE(i)));
595 }
596 for (i = 0; i < 8; i++) { 600 for (i = 0; i < 8; i++) {
597 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); 601 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
598 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); 602 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
@@ -704,18 +708,9 @@ nv30_graph_init(struct drm_device *dev)
704 708
705 nv_wr32(dev, 0x4000c0, 0x00000016); 709 nv_wr32(dev, 0x4000c0, 0x00000016);
706 710
707 /* copy tile info from PFB */ 711 /* Turn all the tiling regions off. */
708 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { 712 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
709 nv_wr32(dev, 0x00400904 + i * 0x10, 713 nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
710 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
711 /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
712 nv_wr32(dev, 0x00400908 + i * 0x10,
713 nv_rd32(dev, NV10_PFB_TSIZE(i)));
714 /* which is NV40_PGRAPH_TSIZE0(i) ?? */
715 nv_wr32(dev, 0x00400900 + i * 0x10,
716 nv_rd32(dev, NV10_PFB_TILE(i)));
717 /* which is NV40_PGRAPH_TILE0(i) ?? */
718 }
719 714
720 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 715 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
721 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); 716 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index ca1d27107a8e..3cd07d8d5bd7 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -3,12 +3,37 @@
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6void
7nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
8 uint32_t size, uint32_t pitch)
9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1;
12
13 if (pitch)
14 addr |= 1;
15
16 switch (dev_priv->chipset) {
17 case 0x40:
18 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
19 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
20 nv_wr32(dev, NV10_PFB_TILE(i), addr);
21 break;
22
23 default:
24 nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
25 nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
26 nv_wr32(dev, NV40_PFB_TILE(i), addr);
27 break;
28 }
29}
30
6int 31int
7nv40_fb_init(struct drm_device *dev) 32nv40_fb_init(struct drm_device *dev)
8{ 33{
9 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 uint32_t fb_bar_size, tmp; 35 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
11 int num_tiles; 36 uint32_t tmp;
12 int i; 37 int i;
13 38
14 /* This is strictly a NV4x register (don't know about NV5x). */ 39 /* This is strictly a NV4x register (don't know about NV5x). */
@@ -23,35 +48,23 @@ nv40_fb_init(struct drm_device *dev)
23 case 0x45: 48 case 0x45:
24 tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2); 49 tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
25 nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15)); 50 nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
26 num_tiles = NV10_PFB_TILE__SIZE; 51 pfb->num_tiles = NV10_PFB_TILE__SIZE;
27 break; 52 break;
28 case 0x46: /* G72 */ 53 case 0x46: /* G72 */
29 case 0x47: /* G70 */ 54 case 0x47: /* G70 */
30 case 0x49: /* G71 */ 55 case 0x49: /* G71 */
31 case 0x4b: /* G73 */ 56 case 0x4b: /* G73 */
32 case 0x4c: /* C51 (G7X version) */ 57 case 0x4c: /* C51 (G7X version) */
33 num_tiles = NV40_PFB_TILE__SIZE_1; 58 pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
34 break; 59 break;
35 default: 60 default:
36 num_tiles = NV40_PFB_TILE__SIZE_0; 61 pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
37 break; 62 break;
38 } 63 }
39 64
40 fb_bar_size = drm_get_resource_len(dev, 0) - 1; 65 /* Turn all the tiling regions off. */
41 switch (dev_priv->chipset) { 66 for (i = 0; i < pfb->num_tiles; i++)
42 case 0x40: 67 pfb->set_region_tiling(dev, i, 0, 0, 0);
43 for (i = 0; i < num_tiles; i++) {
44 nv_wr32(dev, NV10_PFB_TILE(i), 0);
45 nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
46 }
47 break;
48 default:
49 for (i = 0; i < num_tiles; i++) {
50 nv_wr32(dev, NV40_PFB_TILE(i), 0);
51 nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
52 }
53 break;
54 }
55 68
56 return 0; 69 return 0;
57} 70}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 2b332bb55acf..53e8afe1dcd1 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -181,6 +181,48 @@ nv40_graph_unload_context(struct drm_device *dev)
181 return ret; 181 return ret;
182} 182}
183 183
184void
185nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
186 uint32_t size, uint32_t pitch)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 uint32_t limit = max(1u, addr + size) - 1;
190
191 if (pitch)
192 addr |= 1;
193
194 switch (dev_priv->chipset) {
195 case 0x44:
196 case 0x4a:
197 case 0x4e:
198 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
199 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
200 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
201 break;
202
203 case 0x46:
204 case 0x47:
205 case 0x49:
206 case 0x4b:
207 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
208 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
209 nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
210 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
211 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
212 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
213 break;
214
215 default:
216 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
217 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
218 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
219 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
220 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
221 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
222 break;
223 }
224}
225
184/* 226/*
185 * G70 0x47 227 * G70 0x47
186 * G71 0x49 228 * G71 0x49
@@ -195,7 +237,8 @@ nv40_graph_init(struct drm_device *dev)
195{ 237{
196 struct drm_nouveau_private *dev_priv = 238 struct drm_nouveau_private *dev_priv =
197 (struct drm_nouveau_private *)dev->dev_private; 239 (struct drm_nouveau_private *)dev->dev_private;
198 uint32_t vramsz, tmp; 240 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
241 uint32_t vramsz;
199 int i, j; 242 int i, j;
200 243
201 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 244 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -292,74 +335,9 @@ nv40_graph_init(struct drm_device *dev)
292 nv_wr32(dev, 0x400b38, 0x2ffff800); 335 nv_wr32(dev, 0x400b38, 0x2ffff800);
293 nv_wr32(dev, 0x400b3c, 0x00006000); 336 nv_wr32(dev, 0x400b3c, 0x00006000);
294 337
295 /* copy tile info from PFB */ 338 /* Turn all the tiling regions off. */
296 switch (dev_priv->chipset) { 339 for (i = 0; i < pfb->num_tiles; i++)
297 case 0x40: /* vanilla NV40 */ 340 nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
298 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
299 tmp = nv_rd32(dev, NV10_PFB_TILE(i));
300 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
301 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
302 tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
303 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
304 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
305 tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
306 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
307 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
308 tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
309 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
310 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
311 }
312 break;
313 case 0x44:
314 case 0x4a:
315 case 0x4e: /* NV44-based cores don't have 0x406900? */
316 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
317 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
318 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
319 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
320 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
321 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
322 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
323 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
324 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
325 }
326 break;
327 case 0x46:
328 case 0x47:
329 case 0x49:
330 case 0x4b: /* G7X-based cores */
331 for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
332 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
333 nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
334 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
335 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
336 nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
337 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
338 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
339 nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
340 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
341 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
342 nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
343 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
344 }
345 break;
346 default: /* everything else */
347 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
348 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
349 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
350 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
351 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
352 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
353 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
354 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
355 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
356 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
357 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
358 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
359 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
360 }
361 break;
362 }
363 341
364 /* begin RAM config */ 342 /* begin RAM config */
365 vramsz = drm_get_resource_len(dev, 0) - 1; 343 vramsz = drm_get_resource_len(dev, 0) - 1;
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 118d3285fd8c..d1a651e3400c 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
298static void 298static void
299nv50_crtc_destroy(struct drm_crtc *crtc) 299nv50_crtc_destroy(struct drm_crtc *crtc)
300{ 300{
301 struct drm_device *dev = crtc->dev; 301 struct drm_device *dev;
302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 302 struct nouveau_crtc *nv_crtc;
303
304 NV_DEBUG_KMS(dev, "\n");
305 303
306 if (!crtc) 304 if (!crtc)
307 return; 305 return;
308 306
307 dev = crtc->dev;
308 nv_crtc = nouveau_crtc(crtc);
309
310 NV_DEBUG_KMS(dev, "\n");
311
309 drm_crtc_cleanup(&nv_crtc->base); 312 drm_crtc_cleanup(&nv_crtc->base);
310 313
311 nv50_cursor_fini(nv_crtc); 314 nv50_cursor_fini(nv_crtc);
@@ -432,6 +435,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 435 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
433 struct drm_device *dev = crtc->dev; 436 struct drm_device *dev = crtc->dev;
434 struct drm_encoder *encoder; 437 struct drm_encoder *encoder;
438 uint32_t dac = 0, sor = 0;
435 439
436 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 440 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
437 441
@@ -439,9 +443,28 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
439 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 443 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
440 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
441 445
442 if (drm_helper_encoder_in_use(encoder)) 446 if (!drm_helper_encoder_in_use(encoder))
443 continue; 447 continue;
444 448
449 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
450 nv_encoder->dcb->type == OUTPUT_TV)
451 dac |= (1 << nv_encoder->or);
452 else
453 sor |= (1 << nv_encoder->or);
454 }
455
456 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
457 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
458
459 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
460 nv_encoder->dcb->type == OUTPUT_TV) {
461 if (dac & (1 << nv_encoder->or))
462 continue;
463 } else {
464 if (sor & (1 << nv_encoder->or))
465 continue;
466 }
467
445 nv_encoder->disconnect(nv_encoder); 468 nv_encoder->disconnect(nv_encoder);
446 } 469 }
447 470
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a9263d92a231..90f0bf59fbcd 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -690,9 +690,21 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
690 int pxclk) 690 int pxclk)
691{ 691{
692 struct drm_nouveau_private *dev_priv = dev->dev_private; 692 struct drm_nouveau_private *dev_priv = dev->dev_private;
693 struct nouveau_connector *nv_connector = NULL;
694 struct drm_encoder *encoder;
693 struct nvbios *bios = &dev_priv->VBIOS; 695 struct nvbios *bios = &dev_priv->VBIOS;
694 uint32_t mc, script = 0, or; 696 uint32_t mc, script = 0, or;
695 697
698 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
699 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
700
701 if (nv_encoder->dcb != dcbent)
702 continue;
703
704 nv_connector = nouveau_encoder_connector_get(nv_encoder);
705 break;
706 }
707
696 or = ffs(dcbent->or) - 1; 708 or = ffs(dcbent->or) - 1;
697 mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or); 709 mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
698 switch (dcbent->type) { 710 switch (dcbent->type) {
@@ -711,6 +723,11 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
711 } else 723 } else
712 if (bios->fp.strapless_is_24bit & 1) 724 if (bios->fp.strapless_is_24bit & 1)
713 script |= 0x0200; 725 script |= 0x0200;
726
727 if (nv_connector && nv_connector->edid &&
728 (nv_connector->edid->revision >= 4) &&
729 (nv_connector->edid->input & 0x70) >= 0x20)
730 script |= 0x0200;
714 } 731 }
715 732
716 if (nouveau_uscript_lvds >= 0) { 733 if (nouveau_uscript_lvds >= 0) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6bcc6d39e9b0..0f57cdf7ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
3#include "nouveau_dma.h" 3#include "nouveau_dma.h"
4#include "nouveau_fbcon.h" 4#include "nouveau_fbcon.h"
5 5
6static void 6void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{ 8{
9 struct nouveau_fbcon_par *par = info->par; 9 struct nouveau_fbcon_par *par = info->par;
@@ -16,9 +16,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
16 16
17 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && 17 if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
18 RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { 18 RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
19 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 19 nouveau_fbcon_gpu_lockup(info);
20
21 info->flags |= FBINFO_HWACCEL_DISABLED;
22 } 20 }
23 21
24 if (info->flags & FBINFO_HWACCEL_DISABLED) { 22 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -31,7 +29,11 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
31 OUT_RING(chan, 1); 29 OUT_RING(chan, 1);
32 } 30 }
33 BEGIN_RING(chan, NvSub2D, 0x0588, 1); 31 BEGIN_RING(chan, NvSub2D, 0x0588, 1);
34 OUT_RING(chan, rect->color); 32 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
33 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
34 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
35 else
36 OUT_RING(chan, rect->color);
35 BEGIN_RING(chan, NvSub2D, 0x0600, 4); 37 BEGIN_RING(chan, NvSub2D, 0x0600, 4);
36 OUT_RING(chan, rect->dx); 38 OUT_RING(chan, rect->dx);
37 OUT_RING(chan, rect->dy); 39 OUT_RING(chan, rect->dy);
@@ -44,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
44 FIRE_RING(chan); 46 FIRE_RING(chan);
45} 47}
46 48
47static void 49void
48nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
49{ 51{
50 struct nouveau_fbcon_par *par = info->par; 52 struct nouveau_fbcon_par *par = info->par;
@@ -56,9 +58,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
56 return; 58 return;
57 59
58 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { 60 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
59 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 61 nouveau_fbcon_gpu_lockup(info);
60
61 info->flags |= FBINFO_HWACCEL_DISABLED;
62 } 62 }
63 63
64 if (info->flags & FBINFO_HWACCEL_DISABLED) { 64 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
81 FIRE_RING(chan); 81 FIRE_RING(chan);
82} 82}
83 83
84static void 84void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{ 86{
87 struct nouveau_fbcon_par *par = info->par; 87 struct nouveau_fbcon_par *par = info->par;
@@ -101,8 +101,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
101 } 101 }
102 102
103 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { 103 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
104 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 104 nouveau_fbcon_gpu_lockup(info);
105 info->flags |= FBINFO_HWACCEL_DISABLED;
106 } 105 }
107 106
108 if (info->flags & FBINFO_HWACCEL_DISABLED) { 107 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -135,9 +134,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
135 int push = dwords > 2047 ? 2047 : dwords; 134 int push = dwords > 2047 ? 2047 : dwords;
136 135
137 if (RING_SPACE(chan, push + 1)) { 136 if (RING_SPACE(chan, push + 1)) {
138 NV_ERROR(dev, 137 nouveau_fbcon_gpu_lockup(info);
139 "GPU lockup - switching to software fbcon\n");
140 info->flags |= FBINFO_HWACCEL_DISABLED;
141 cfb_imageblit(info, image); 138 cfb_imageblit(info, image);
142 return; 139 return;
143 } 140 }
@@ -199,7 +196,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
199 196
200 ret = RING_SPACE(chan, 59); 197 ret = RING_SPACE(chan, 59);
201 if (ret) { 198 if (ret) {
202 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 199 nouveau_fbcon_gpu_lockup(info);
203 return ret; 200 return ret;
204 } 201 }
205 202
@@ -265,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
265 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
266 dev_priv->vm_vram_base); 263 dev_priv->vm_vram_base);
267 264
268 info->fbops->fb_fillrect = nv50_fbcon_fillrect;
269 info->fbops->fb_copyarea = nv50_fbcon_copyarea;
270 info->fbops->fb_imageblit = nv50_fbcon_imageblit;
271 return 0; 265 return 0;
272} 266}
273 267
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index b7282284f080..204a79ff10f4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -272,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
272 return ret; 272 return ret;
273 ramfc = chan->ramfc->gpuobj; 273 ramfc = chan->ramfc->gpuobj;
274 274
275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256, 275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
276 0, &chan->cache); 276 0, &chan->cache);
277 if (ret) 277 if (ret)
278 return ret; 278 return ret;
@@ -317,17 +317,20 @@ void
317nv50_fifo_destroy_context(struct nouveau_channel *chan) 317nv50_fifo_destroy_context(struct nouveau_channel *chan)
318{ 318{
319 struct drm_device *dev = chan->dev; 319 struct drm_device *dev = chan->dev;
320 struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
320 321
321 NV_DEBUG(dev, "ch%d\n", chan->id); 322 NV_DEBUG(dev, "ch%d\n", chan->id);
322 323
323 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 324 /* This will ensure the channel is seen as disabled. */
324 nouveau_gpuobj_ref_del(dev, &chan->cache); 325 chan->ramfc = NULL;
325
326 nv50_fifo_channel_disable(dev, chan->id, false); 326 nv50_fifo_channel_disable(dev, chan->id, false);
327 327
328 /* Dummy channel, also used on ch 127 */ 328 /* Dummy channel, also used on ch 127 */
329 if (chan->id == 0) 329 if (chan->id == 0)
330 nv50_fifo_channel_disable(dev, 127, false); 330 nv50_fifo_channel_disable(dev, 127, false);
331
332 nouveau_gpuobj_ref_del(dev, &ramfc);
333 nouveau_gpuobj_ref_del(dev, &chan->cache);
331} 334}
332 335
333int 336int
@@ -384,8 +387,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
384 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), 387 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
385 nv_ro32(dev, cache, (ptr * 2) + 1)); 388 nv_ro32(dev, cache, (ptr * 2) + 1));
386 } 389 }
387 nv_wr32(dev, 0x3210, cnt << 2); 390 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
388 nv_wr32(dev, 0x3270, 0); 391 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
389 392
390 /* guessing that all the 0x34xx regs aren't on NV50 */ 393 /* guessing that all the 0x34xx regs aren't on NV50 */
391 if (!IS_G80) { 394 if (!IS_G80) {
@@ -398,8 +401,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
398 401
399 dev_priv->engine.instmem.finish_access(dev); 402 dev_priv->engine.instmem.finish_access(dev);
400 403
401 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
402 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
403 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); 404 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
404 return 0; 405 return 0;
405} 406}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index ca79f32be44c..6d504801b514 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -84,7 +84,7 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
84 nv_wr32(dev, 0x400804, 0xc0000000); 84 nv_wr32(dev, 0x400804, 0xc0000000);
85 nv_wr32(dev, 0x406800, 0xc0000000); 85 nv_wr32(dev, 0x406800, 0xc0000000);
86 nv_wr32(dev, 0x400c04, 0xc0000000); 86 nv_wr32(dev, 0x400c04, 0xc0000000);
87 nv_wr32(dev, 0x401804, 0xc0000000); 87 nv_wr32(dev, 0x401800, 0xc0000000);
88 nv_wr32(dev, 0x405018, 0xc0000000); 88 nv_wr32(dev, 0x405018, 0xc0000000);
89 nv_wr32(dev, 0x402000, 0xc0000000); 89 nv_wr32(dev, 0x402000, 0xc0000000);
90 90
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
165 uint32_t inst; 165 uint32_t inst;
166 int i; 166 int i;
167 167
168 /* Be sure we're not in the middle of a context switch or bad things
169 * will happen, such as unloading the wrong pgraph context.
170 */
171 if (!nv_wait(0x400300, 0x00000001, 0x00000000))
172 NV_ERROR(dev, "Ctxprog is still running\n");
173
168 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 174 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
169 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 175 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
170 return NULL; 176 return NULL;
@@ -275,19 +281,18 @@ nv50_graph_load_context(struct nouveau_channel *chan)
275int 281int
276nv50_graph_unload_context(struct drm_device *dev) 282nv50_graph_unload_context(struct drm_device *dev)
277{ 283{
278 uint32_t inst, fifo = nv_rd32(dev, 0x400500); 284 uint32_t inst;
279 285
280 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 286 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
281 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 287 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
282 return 0; 288 return 0;
283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 289 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
284 290
285 nv_wr32(dev, 0x400500, fifo & ~1); 291 nouveau_wait_for_idle(dev);
286 nv_wr32(dev, 0x400784, inst); 292 nv_wr32(dev, 0x400784, inst);
287 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 293 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
288 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 294 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
289 nouveau_wait_for_idle(dev); 295 nouveau_wait_for_idle(dev);
290 nv_wr32(dev, 0x400500, fifo);
291 296
292 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 297 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
293 return 0; 298 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index e395c16d30f5..c2fff543b06f 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -90,11 +90,25 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
90{ 90{
91 struct drm_device *dev = encoder->dev; 91 struct drm_device *dev = encoder->dev;
92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
93 struct drm_encoder *enc;
93 uint32_t val; 94 uint32_t val;
94 int or = nv_encoder->or; 95 int or = nv_encoder->or;
95 96
96 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); 97 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
97 98
99 nv_encoder->last_dpms = mode;
100 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
101 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
102
103 if (nvenc == nv_encoder ||
104 nvenc->disconnect != nv50_sor_disconnect ||
105 nvenc->dcb->or != nv_encoder->dcb->or)
106 continue;
107
108 if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
109 return;
110 }
111
98 /* wait for it to be done */ 112 /* wait for it to be done */
99 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), 113 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
100 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { 114 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321be4d5..1c02d23f6fcc 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
1config DRM_RADEON_KMS 1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default" 2 bool "Enable modesetting on radeon by default - NEW DRIVER"
3 depends on DRM_RADEON 3 depends on DRM_RADEON
4 help 4 help
5 Choose this option if you want kernel modesetting enabled by default, 5 Choose this option if you want kernel modesetting enabled by default.
6 and you have a new enough userspace to support this. Running old 6
7 userspaces with this enabled will cause pain. 7 This is a completely new driver. It's only part of the existing drm
8 for compatibility reasons. It requires an entirely different graphics
9 stack above it and works very differently from the old drm stack.
10 i.e. don't enable this unless you know what you are doing it may
11 cause issues or bugs compared to the previous userspace driver stack.
8 12
9 When kernel modesetting is enabled the IOCTL of radeon/drm 13 When kernel modesetting is enabled the IOCTL of radeon/drm
10 driver are considered as invalid and an error message is printed 14 driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index b5f5fe75e6af..1cc7b937b1ea 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -24,6 +24,9 @@ $(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
24$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable 24$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
25 $(call if_changed,mkregtable) 25 $(call if_changed,mkregtable)
26 26
27$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
28 $(call if_changed,mkregtable)
29
27$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable 30$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
28 $(call if_changed,mkregtable) 31 $(call if_changed,mkregtable)
29 32
@@ -35,6 +38,8 @@ $(obj)/rv515.o: $(obj)/rv515_reg_safe.h
35 38
36$(obj)/r300.o: $(obj)/r300_reg_safe.h 39$(obj)/r300.o: $(obj)/r300_reg_safe.h
37 40
41$(obj)/r420.o: $(obj)/r420_reg_safe.h
42
38$(obj)/rs600.o: $(obj)/rs600_reg_safe.h 43$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
39 44
40radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ 45radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index 6d0183c61d3b..c714179d1bfa 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright 2006-2007 Advanced Micro Devices, Inc. 2* Copyright 2006-2007 Advanced Micro Devices, Inc.
3* 3*
4* Permission is hereby granted, free of charge, to any person obtaining a 4* Permission is hereby granted, free of charge, to any person obtaining a
5* copy of this software and associated documentation files (the "Software"), 5* copy of this software and associated documentation files (the "Software"),
@@ -41,14 +41,14 @@
41/****************************************************/ 41/****************************************************/
42/* Encoder Object ID Definition */ 42/* Encoder Object ID Definition */
43/****************************************************/ 43/****************************************************/
44#define ENCODER_OBJECT_ID_NONE 0x00 44#define ENCODER_OBJECT_ID_NONE 0x00
45 45
46/* Radeon Class Display Hardware */ 46/* Radeon Class Display Hardware */
47#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01 47#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
48#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02 48#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
49#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03 49#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
50#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04 50#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
51#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */ 51#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
52#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06 52#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
53#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07 53#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
54 54
@@ -56,11 +56,11 @@
56#define ENCODER_OBJECT_ID_SI170B 0x08 56#define ENCODER_OBJECT_ID_SI170B 0x08
57#define ENCODER_OBJECT_ID_CH7303 0x09 57#define ENCODER_OBJECT_ID_CH7303 0x09
58#define ENCODER_OBJECT_ID_CH7301 0x0A 58#define ENCODER_OBJECT_ID_CH7301 0x0A
59#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */ 59#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
60#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C 60#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
61#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D 61#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
62#define ENCODER_OBJECT_ID_TITFP513 0x0E 62#define ENCODER_OBJECT_ID_TITFP513 0x0E
63#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */ 63#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
64#define ENCODER_OBJECT_ID_VT1623 0x10 64#define ENCODER_OBJECT_ID_VT1623 0x10
65#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 65#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
66#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 66#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
@@ -68,9 +68,9 @@
68#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 68#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
69#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 69#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
70#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15 70#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
71#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */ 71#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
72#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */ 72#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
73#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */ 73#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
74#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19 74#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
75#define ENCODER_OBJECT_ID_VT1625 0x1A 75#define ENCODER_OBJECT_ID_VT1625 0x1A
76#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B 76#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
@@ -86,7 +86,7 @@
86/****************************************************/ 86/****************************************************/
87/* Connector Object ID Definition */ 87/* Connector Object ID Definition */
88/****************************************************/ 88/****************************************************/
89#define CONNECTOR_OBJECT_ID_NONE 0x00 89#define CONNECTOR_OBJECT_ID_NONE 0x00
90#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01 90#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
91#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02 91#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
92#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03 92#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
@@ -96,7 +96,7 @@
96#define CONNECTOR_OBJECT_ID_SVIDEO 0x07 96#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
97#define CONNECTOR_OBJECT_ID_YPbPr 0x08 97#define CONNECTOR_OBJECT_ID_YPbPr 0x08
98#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09 98#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
99#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */ 99#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
100#define CONNECTOR_OBJECT_ID_SCART 0x0B 100#define CONNECTOR_OBJECT_ID_SCART 0x0B
101#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C 101#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
102#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D 102#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
@@ -106,6 +106,8 @@
106#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11 106#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
107#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12 107#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
108#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 108#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
109#define CONNECTOR_OBJECT_ID_eDP 0x14
110#define CONNECTOR_OBJECT_ID_MXM 0x15
109 111
110/* deleted */ 112/* deleted */
111 113
@@ -116,6 +118,14 @@
116#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01 118#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
117 119
118/****************************************************/ 120/****************************************************/
121/* Generic Object ID Definition */
122/****************************************************/
123#define GENERIC_OBJECT_ID_NONE 0x00
124#define GENERIC_OBJECT_ID_GLSYNC 0x01
125#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
126#define GENERIC_OBJECT_ID_MXM_OPM 0x03
127
128/****************************************************/
119/* Graphics Object ENUM ID Definition */ 129/* Graphics Object ENUM ID Definition */
120/****************************************************/ 130/****************************************************/
121#define GRAPH_OBJECT_ENUM_ID1 0x01 131#define GRAPH_OBJECT_ENUM_ID1 0x01
@@ -124,6 +134,7 @@
124#define GRAPH_OBJECT_ENUM_ID4 0x04 134#define GRAPH_OBJECT_ENUM_ID4 0x04
125#define GRAPH_OBJECT_ENUM_ID5 0x05 135#define GRAPH_OBJECT_ENUM_ID5 0x05
126#define GRAPH_OBJECT_ENUM_ID6 0x06 136#define GRAPH_OBJECT_ENUM_ID6 0x06
137#define GRAPH_OBJECT_ENUM_ID7 0x07
127 138
128/****************************************************/ 139/****************************************************/
129/* Graphics Object ID Bit definition */ 140/* Graphics Object ID Bit definition */
@@ -133,35 +144,35 @@
133#define RESERVED1_ID_MASK 0x0800 144#define RESERVED1_ID_MASK 0x0800
134#define OBJECT_TYPE_MASK 0x7000 145#define OBJECT_TYPE_MASK 0x7000
135#define RESERVED2_ID_MASK 0x8000 146#define RESERVED2_ID_MASK 0x8000
136 147
137#define OBJECT_ID_SHIFT 0x00 148#define OBJECT_ID_SHIFT 0x00
138#define ENUM_ID_SHIFT 0x08 149#define ENUM_ID_SHIFT 0x08
139#define OBJECT_TYPE_SHIFT 0x0C 150#define OBJECT_TYPE_SHIFT 0x0C
140 151
152
141/****************************************************/ 153/****************************************************/
142/* Graphics Object family definition */ 154/* Graphics Object family definition */
143/****************************************************/ 155/****************************************************/
144#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \ 156#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
145 (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \ 157 GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
146 GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
147/****************************************************/ 158/****************************************************/
148/* GPU Object ID definition - Shared with BIOS */ 159/* GPU Object ID definition - Shared with BIOS */
149/****************************************************/ 160/****************************************************/
150#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\ 161#define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
151 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT) 162 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
152 163
153/****************************************************/ 164/****************************************************/
154/* Encoder Object ID definition - Shared with BIOS */ 165/* Encoder Object ID definition - Shared with BIOS */
155/****************************************************/ 166/****************************************************/
156/* 167/*
157#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101 168#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
158#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102 169#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
159#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103 170#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
160#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104 171#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
161#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105 172#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
162#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106 173#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
163#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107 174#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
164#define ENCODER_SIL170B_ENUM_ID1 0x2108 175#define ENCODER_SIL170B_ENUM_ID1 0x2108
165#define ENCODER_CH7303_ENUM_ID1 0x2109 176#define ENCODER_CH7303_ENUM_ID1 0x2109
166#define ENCODER_CH7301_ENUM_ID1 0x210A 177#define ENCODER_CH7301_ENUM_ID1 0x210A
167#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B 178#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
@@ -175,8 +186,8 @@
175#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113 186#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
176#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114 187#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
177#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115 188#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
178#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116 189#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
179#define ENCODER_SI178_ENUM_ID1 0x2117 190#define ENCODER_SI178_ENUM_ID1 0x2117
180#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118 191#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
181#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119 192#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
182#define ENCODER_VT1625_ENUM_ID1 0x211A 193#define ENCODER_VT1625_ENUM_ID1 0x211A
@@ -185,205 +196,169 @@
185#define ENCODER_DP_DP501_ENUM_ID1 0x211D 196#define ENCODER_DP_DP501_ENUM_ID1 0x211D
186#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E 197#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
187*/ 198*/
188#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \ 199#define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
189 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 200 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
190 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 201 ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
191 ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT) 202
192 203#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
193#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \ 204 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
194 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 205 ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
195 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 206
196 ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT) 207#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
197 208 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
198#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \ 209 ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
199 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 210
200 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 211#define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
201 ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT) 212 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
202 213 ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
203#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \ 214
204 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 215#define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
205 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 216 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
206 ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT) 217 ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
207 218
208#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \ 219#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
209 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 220 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
210 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 221 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
211 ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT) 222
212 223#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
213#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \ 224 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
214 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 225 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
215 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 226
216 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) 227#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
217 228 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
218#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \ 229 ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
219 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 230
220 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 231#define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
221 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) 232 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
222 233 ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
223#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \ 234
224 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 235#define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
225 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 236 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
226 ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT) 237 ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
227 238
228#define ENCODER_SIL170B_ENUM_ID1 \ 239#define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
229 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 240 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
230 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 241 ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
231 ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT) 242
232 243#define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
233#define ENCODER_CH7303_ENUM_ID1 \ 244 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
234 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 245 ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
235 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 246
236 ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT) 247#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
237 248 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
238#define ENCODER_CH7301_ENUM_ID1 \ 249 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
239 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 250
240 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 251#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
241 ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT) 252 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
242 253 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
243#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \ 254
244 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 255
245 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 256#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
246 ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT) 257 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
247 258 ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
248#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \ 259
249 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 260
250 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 261#define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
251 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) 262 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
252 263 ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
253#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \ 264
254 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 265#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
255 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 266 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
256 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) 267 ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
257 268
258#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \ 269#define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
259 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 270 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
260 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 271 ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
261 ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT) 272
262 273#define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
263#define ENCODER_TITFP513_ENUM_ID1 \ 274 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
264 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 275 ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
265 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 276
266 ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT) 277#define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
267 278 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
268#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \ 279 ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
269 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 280
270 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 281#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
271 ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT) 282 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
272 283 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
273#define ENCODER_VT1623_ENUM_ID1 \ 284
274 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 285
275 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 286#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
276 ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT) 287 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
277 288 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
278#define ENCODER_HDMI_SI1930_ENUM_ID1 \ 289
279 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 290
280 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 291#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
281 ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT) 292 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
282 293 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
283#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \ 294
284 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 295#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
285 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 296 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
286 ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT) 297 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
287 298
288#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \ 299#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
289 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 300 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
290 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 301 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT
291 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) 302
292 303#define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
293#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \ 304 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
294 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 305 ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
295 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 306
296 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) 307#define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
297 308 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
298#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \ 309 ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
299 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 310
300 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 311#define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
301 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT) 312 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
302 313 ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
303#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \ 314
304 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 315#define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
305 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 316 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
306 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT) 317 ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
307 318
308#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \ 319#define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
309 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 320 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
310 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 321 ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
311 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */ 322
312 323#define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
313#define ENCODER_SI178_ENUM_ID1 \ 324 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
314 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 325 ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
315 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 326
316 ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT) 327#define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
317 328 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
318#define ENCODER_MVPU_FPGA_ENUM_ID1 \ 329 ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
319 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 330
320 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 331#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
321 ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT) 332 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
322 333 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
323#define ENCODER_INTERNAL_DDI_ENUM_ID1 \ 334
324 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 335#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
325 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 336 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
326 ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) 337 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
327 338
328#define ENCODER_VT1625_ENUM_ID1 \ 339#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
329 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 340 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
330 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 341 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
331 ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT) 342
332 343#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
333#define ENCODER_HDMI_SI1932_ENUM_ID1 \ 344 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
334 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 345 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
335 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 346
336 ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT) 347#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
337 348 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
338#define ENCODER_DP_DP501_ENUM_ID1 \ 349 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
339 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 350
340 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 351#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
341 ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT) 352 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
342 353 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
343#define ENCODER_DP_AN9801_ENUM_ID1 \ 354
344 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 355#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
345 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 356 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
346 ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT) 357 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
347 358
348#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \ 359#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
349 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 360 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
350 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 361 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
351 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
352
353#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
354 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
355 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
356 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
357
358#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
359 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
360 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
361 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
362
363#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
364 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
365 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
366 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
367
368#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
369 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
370 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
371 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
372
373#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
374 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
375 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
376 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
377
378#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
379 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
380 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
381 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
382
383#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
384 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
385 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
386 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
387 362
388/****************************************************/ 363/****************************************************/
389/* Connector Object ID definition - Shared with BIOS */ 364/* Connector Object ID definition - Shared with BIOS */
@@ -406,167 +381,253 @@
406#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F 381#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
407#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110 382#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
408*/ 383*/
409#define CONNECTOR_LVDS_ENUM_ID1 \ 384#define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
410 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 385 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
411 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 386 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
412 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT) 387
413 388#define CONNECTOR_LVDS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
414#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \ 389 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
415 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 390 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
416 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 391
417 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) 392#define CONNECTOR_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
418 393 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
419#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \ 394 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
420 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 395
421 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 396#define CONNECTOR_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
422 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) 397 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
423 398 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
424#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \ 399
425 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 400#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
426 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 401 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
427 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) 402 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
428 403
429#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \ 404#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
430 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 405 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
431 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 406 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
432 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) 407
433 408#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
434#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \ 409 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
435 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 410 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
436 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 411
437 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) 412#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
438 413 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
439#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \ 414 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
440 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 415
441 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 416#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
442 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) 417 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
443 418 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
444#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \ 419
445 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 420#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
446 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 421 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
447 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) 422 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
448 423
449#define CONNECTOR_VGA_ENUM_ID1 \ 424#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
450 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 425 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
451 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 426 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
452 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) 427
453 428#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
454#define CONNECTOR_VGA_ENUM_ID2 \ 429 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
455 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 430 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
456 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 431
457 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) 432#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
458 433 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
459#define CONNECTOR_COMPOSITE_ENUM_ID1 \ 434 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
460 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 435
461 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 436#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
462 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT) 437 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
463 438 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
464#define CONNECTOR_SVIDEO_ENUM_ID1 \ 439
465 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 440#define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
466 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 441 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
467 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT) 442 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
468 443
469#define CONNECTOR_YPbPr_ENUM_ID1 \ 444#define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
470 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 445 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
471 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 446 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
472 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT) 447
473 448#define CONNECTOR_COMPOSITE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
474#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \ 449 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
475 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 450 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
476 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 451
477 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT) 452#define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
478 453 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
479#define CONNECTOR_9PIN_DIN_ENUM_ID1 \ 454 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
480 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 455
481 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 456#define CONNECTOR_SVIDEO_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
482 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT) 457 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
483 458 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
484#define CONNECTOR_SCART_ENUM_ID1 \ 459
485 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 460#define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
486 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 461 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
487 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT) 462 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
488 463
489#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \ 464#define CONNECTOR_YPbPr_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
490 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 465 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
491 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 466 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
492 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) 467
493 468#define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
494#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \ 469 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
495 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 470 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
496 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 471
497 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) 472#define CONNECTOR_D_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
498 473 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
499#define CONNECTOR_7PIN_DIN_ENUM_ID1 \ 474 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
500 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 475
501 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 476#define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
502 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) 477 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
503 478 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
504#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \ 479
505 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 480#define CONNECTOR_9PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
506 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 481 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
507 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) 482 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
508 483
509#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \ 484#define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
510 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 485 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
511 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 486 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
512 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) 487
513 488#define CONNECTOR_SCART_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
514#define CONNECTOR_CROSSFIRE_ENUM_ID1 \ 489 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
515 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 490 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
516 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 491
517 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) 492#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
518 493 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
519#define CONNECTOR_CROSSFIRE_ENUM_ID2 \ 494 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
520 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 495
521 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 496#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
522 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) 497 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
523 498 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
524#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \ 499
525 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 500#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
526 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 501 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
527 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) 502 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
528 503
529#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \ 504#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
530 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 505 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
531 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 506 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
532 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) 507
533 508#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
534#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \ 509 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
535 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 510 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
536 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 511
537 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) 512#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
538 513 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
539#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \ 514 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
540 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 515#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
541 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 516 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
542 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) 517 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
543 518
544#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \ 519#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
545 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 520 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
546 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ 521 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
547 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) 522
548 523#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
549#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \ 524 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
550 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 525 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
551 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ 526
552 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) 527#define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
528 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
529 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
530
531#define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
532 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
533 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
534
535
536#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
537 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
538 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
539
540#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
541 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
542 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
543
544#define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
545 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
546 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
547
548#define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
549 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
550 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
551
552#define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
553 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
554 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
555
556#define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
557 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
558 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
559
560#define CONNECTOR_DISPLAYPORT_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
561 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
562 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
563
564#define CONNECTOR_DISPLAYPORT_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
565 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
566 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
567
568#define CONNECTOR_MXM_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
569 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
570 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_A
571
572#define CONNECTOR_MXM_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
573 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
574 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_B
575
576#define CONNECTOR_MXM_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
577 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
578 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_C
579
580#define CONNECTOR_MXM_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
581 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
582 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_D
583
584#define CONNECTOR_MXM_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
585 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
586 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_TXxx
587
588#define CONNECTOR_MXM_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
589 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
590 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_UXxx
591
592#define CONNECTOR_MXM_ENUM_ID7 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
593 GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
594 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
553 595
554/****************************************************/ 596/****************************************************/
555/* Router Object ID definition - Shared with BIOS */ 597/* Router Object ID definition - Shared with BIOS */
556/****************************************************/ 598/****************************************************/
557#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \ 599#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
558 (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\ 600 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
559 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 601 ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
560 ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
561 602
562/* deleted */ 603/* deleted */
563 604
564/****************************************************/ 605/****************************************************/
606/* Generic Object ID definition - Shared with BIOS */
607/****************************************************/
608#define GENERICOBJECT_GLSYNC_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
609 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
610 GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
611
612#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
613 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
614 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
615
616#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
617 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
618 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
619
620#define GENERICOBJECT_MXM_OPM_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
621 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
622 GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
623
624/****************************************************/
565/* Object Cap definition - Shared with BIOS */ 625/* Object Cap definition - Shared with BIOS */
566/****************************************************/ 626/****************************************************/
567#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L 627#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
568#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L 628#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
569 629
630
570#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01 631#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
571#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02 632#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
572#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03 633#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
@@ -575,4 +636,8 @@
575#pragma pack() 636#pragma pack()
576#endif 637#endif
577 638
578#endif /*GRAPHICTYPE */ 639#endif /*GRAPHICTYPE */
640
641
642
643
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 388140a7e651..7f152f66f196 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <asm/unaligned.h>
27 28
28#define ATOM_DEBUG 29#define ATOM_DEBUG
29 30
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
212 case ATOM_ARG_PS: 213 case ATOM_ARG_PS:
213 idx = U8(*ptr); 214 idx = U8(*ptr);
214 (*ptr)++; 215 (*ptr)++;
215 val = le32_to_cpu(ctx->ps[idx]); 216 /* get_unaligned_le32 avoids unaligned accesses from atombios
217 * tables, noticed on a DEC Alpha. */
218 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
216 if (print) 219 if (print)
217 DEBUG("PS[0x%02X,0x%04X]", idx, val); 220 DEBUG("PS[0x%02X,0x%04X]", idx, val);
218 break; 221 break;
@@ -246,6 +249,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
246 case ATOM_WS_ATTRIBUTES: 249 case ATOM_WS_ATTRIBUTES:
247 val = gctx->io_attr; 250 val = gctx->io_attr;
248 break; 251 break;
252 case ATOM_WS_REGPTR:
253 val = gctx->reg_block;
254 break;
249 default: 255 default:
250 val = ctx->ws[idx]; 256 val = ctx->ws[idx];
251 } 257 }
@@ -385,6 +391,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
385 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 391 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
386} 392}
387 393
394static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
395{
396 uint32_t val = 0xCDCDCDCD;
397
398 switch (align) {
399 case ATOM_SRC_DWORD:
400 val = U32(*ptr);
401 (*ptr) += 4;
402 break;
403 case ATOM_SRC_WORD0:
404 case ATOM_SRC_WORD8:
405 case ATOM_SRC_WORD16:
406 val = U16(*ptr);
407 (*ptr) += 2;
408 break;
409 case ATOM_SRC_BYTE0:
410 case ATOM_SRC_BYTE8:
411 case ATOM_SRC_BYTE16:
412 case ATOM_SRC_BYTE24:
413 val = U8(*ptr);
414 (*ptr)++;
415 break;
416 }
417 return val;
418}
419
388static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, 420static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
389 int *ptr, uint32_t *saved, int print) 421 int *ptr, uint32_t *saved, int print)
390{ 422{
@@ -482,6 +514,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
482 case ATOM_WS_ATTRIBUTES: 514 case ATOM_WS_ATTRIBUTES:
483 gctx->io_attr = val; 515 gctx->io_attr = val;
484 break; 516 break;
517 case ATOM_WS_REGPTR:
518 gctx->reg_block = val;
519 break;
485 default: 520 default:
486 ctx->ws[idx] = val; 521 ctx->ws[idx] = val;
487 } 522 }
@@ -608,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
608 uint8_t count = U8((*ptr)++); 643 uint8_t count = U8((*ptr)++);
609 SDEBUG(" count: %d\n", count); 644 SDEBUG(" count: %d\n", count);
610 if (arg == ATOM_UNIT_MICROSEC) 645 if (arg == ATOM_UNIT_MICROSEC)
611 schedule_timeout_uninterruptible(usecs_to_jiffies(count)); 646 udelay(count);
612 else 647 else
613 schedule_timeout_uninterruptible(msecs_to_jiffies(count)); 648 schedule_timeout_uninterruptible(msecs_to_jiffies(count));
614} 649}
@@ -677,7 +712,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
677 SDEBUG(" dst: "); 712 SDEBUG(" dst: ");
678 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 713 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
679 SDEBUG(" src1: "); 714 SDEBUG(" src1: ");
680 src1 = atom_get_src(ctx, attr, ptr); 715 src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
681 SDEBUG(" src2: "); 716 SDEBUG(" src2: ");
682 src2 = atom_get_src(ctx, attr, ptr); 717 src2 = atom_get_src(ctx, attr, ptr);
683 dst &= src1; 718 dst &= src1;
@@ -809,6 +844,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
809 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); 844 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
810} 845}
811 846
847static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
848{
849 uint8_t attr = U8((*ptr)++), shift;
850 uint32_t saved, dst;
851 int dptr = *ptr;
852 attr &= 0x38;
853 attr |= atom_def_dst[attr >> 3] << 6;
854 SDEBUG(" dst: ");
855 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
856 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
857 SDEBUG(" shift: %d\n", shift);
858 dst <<= shift;
859 SDEBUG(" dst: ");
860 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
861}
862
863static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
864{
865 uint8_t attr = U8((*ptr)++), shift;
866 uint32_t saved, dst;
867 int dptr = *ptr;
868 attr &= 0x38;
869 attr |= atom_def_dst[attr >> 3] << 6;
870 SDEBUG(" dst: ");
871 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
872 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
873 SDEBUG(" shift: %d\n", shift);
874 dst >>= shift;
875 SDEBUG(" dst: ");
876 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
877}
878
812static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 879static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
813{ 880{
814 uint8_t attr = U8((*ptr)++), shift; 881 uint8_t attr = U8((*ptr)++), shift;
@@ -818,7 +885,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
818 attr |= atom_def_dst[attr >> 3] << 6; 885 attr |= atom_def_dst[attr >> 3] << 6;
819 SDEBUG(" dst: "); 886 SDEBUG(" dst: ");
820 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 887 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
821 shift = U8((*ptr)++); 888 shift = atom_get_src(ctx, attr, ptr);
822 SDEBUG(" shift: %d\n", shift); 889 SDEBUG(" shift: %d\n", shift);
823 dst <<= shift; 890 dst <<= shift;
824 SDEBUG(" dst: "); 891 SDEBUG(" dst: ");
@@ -834,7 +901,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
834 attr |= atom_def_dst[attr >> 3] << 6; 901 attr |= atom_def_dst[attr >> 3] << 6;
835 SDEBUG(" dst: "); 902 SDEBUG(" dst: ");
836 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 903 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
837 shift = U8((*ptr)++); 904 shift = atom_get_src(ctx, attr, ptr);
838 SDEBUG(" shift: %d\n", shift); 905 SDEBUG(" shift: %d\n", shift);
839 dst >>= shift; 906 dst >>= shift;
840 SDEBUG(" dst: "); 907 SDEBUG(" dst: ");
@@ -937,18 +1004,18 @@ static struct {
937 atom_op_or, ATOM_ARG_FB}, { 1004 atom_op_or, ATOM_ARG_FB}, {
938 atom_op_or, ATOM_ARG_PLL}, { 1005 atom_op_or, ATOM_ARG_PLL}, {
939 atom_op_or, ATOM_ARG_MC}, { 1006 atom_op_or, ATOM_ARG_MC}, {
940 atom_op_shl, ATOM_ARG_REG}, { 1007 atom_op_shift_left, ATOM_ARG_REG}, {
941 atom_op_shl, ATOM_ARG_PS}, { 1008 atom_op_shift_left, ATOM_ARG_PS}, {
942 atom_op_shl, ATOM_ARG_WS}, { 1009 atom_op_shift_left, ATOM_ARG_WS}, {
943 atom_op_shl, ATOM_ARG_FB}, { 1010 atom_op_shift_left, ATOM_ARG_FB}, {
944 atom_op_shl, ATOM_ARG_PLL}, { 1011 atom_op_shift_left, ATOM_ARG_PLL}, {
945 atom_op_shl, ATOM_ARG_MC}, { 1012 atom_op_shift_left, ATOM_ARG_MC}, {
946 atom_op_shr, ATOM_ARG_REG}, { 1013 atom_op_shift_right, ATOM_ARG_REG}, {
947 atom_op_shr, ATOM_ARG_PS}, { 1014 atom_op_shift_right, ATOM_ARG_PS}, {
948 atom_op_shr, ATOM_ARG_WS}, { 1015 atom_op_shift_right, ATOM_ARG_WS}, {
949 atom_op_shr, ATOM_ARG_FB}, { 1016 atom_op_shift_right, ATOM_ARG_FB}, {
950 atom_op_shr, ATOM_ARG_PLL}, { 1017 atom_op_shift_right, ATOM_ARG_PLL}, {
951 atom_op_shr, ATOM_ARG_MC}, { 1018 atom_op_shift_right, ATOM_ARG_MC}, {
952 atom_op_mul, ATOM_ARG_REG}, { 1019 atom_op_mul, ATOM_ARG_REG}, {
953 atom_op_mul, ATOM_ARG_PS}, { 1020 atom_op_mul, ATOM_ARG_PS}, {
954 atom_op_mul, ATOM_ARG_WS}, { 1021 atom_op_mul, ATOM_ARG_WS}, {
@@ -1058,8 +1125,6 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1058 1125
1059 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); 1126 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1060 1127
1061 /* reset reg block */
1062 ctx->reg_block = 0;
1063 ectx.ctx = ctx; 1128 ectx.ctx = ctx;
1064 ectx.ps_shift = ps / 4; 1129 ectx.ps_shift = ps / 4;
1065 ectx.start = base; 1130 ectx.start = base;
@@ -1096,6 +1161,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1096void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1161void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1097{ 1162{
1098 mutex_lock(&ctx->mutex); 1163 mutex_lock(&ctx->mutex);
1164 /* reset reg block */
1165 ctx->reg_block = 0;
1166 /* reset fb window */
1167 ctx->fb_base = 0;
1168 /* reset io mode */
1169 ctx->io_mode = ATOM_IO_MM;
1099 atom_execute_table_locked(ctx, index, params); 1170 atom_execute_table_locked(ctx, index, params);
1100 mutex_unlock(&ctx->mutex); 1171 mutex_unlock(&ctx->mutex);
1101} 1172}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 47fd943f6d14..bc73781423a1 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -91,6 +91,7 @@
91#define ATOM_WS_AND_MASK 0x45 91#define ATOM_WS_AND_MASK 0x45
92#define ATOM_WS_FB_WINDOW 0x46 92#define ATOM_WS_FB_WINDOW 0x46
93#define ATOM_WS_ATTRIBUTES 0x47 93#define ATOM_WS_ATTRIBUTES 0x47
94#define ATOM_WS_REGPTR 0x48
94 95
95#define ATOM_IIO_NOP 0 96#define ATOM_IIO_NOP 0
96#define ATOM_IIO_START 1 97#define ATOM_IIO_START 1
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 260fcf59f00c..af464e351fbd 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
308 args.ucCRTC = radeon_crtc->crtc_id; 308 args.ucCRTC = radeon_crtc->crtc_id;
309 309
310 printk("executing set crtc dtd timing\n");
311 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 310 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
312} 311}
313 312
@@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
347 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 346 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
348 args.ucCRTC = radeon_crtc->crtc_id; 347 args.ucCRTC = radeon_crtc->crtc_id;
349 348
350 printk("executing set crtc timing\n");
351 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 349 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
352} 350}
353 351
@@ -409,59 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
409 } 407 }
410} 408}
411 409
412void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 410union adjust_pixel_clock {
411 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
412};
413
414static u32 atombios_adjust_pll(struct drm_crtc *crtc,
415 struct drm_display_mode *mode,
416 struct radeon_pll *pll)
413{ 417{
414 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
415 struct drm_device *dev = crtc->dev; 418 struct drm_device *dev = crtc->dev;
416 struct radeon_device *rdev = dev->dev_private; 419 struct radeon_device *rdev = dev->dev_private;
417 struct drm_encoder *encoder = NULL; 420 struct drm_encoder *encoder = NULL;
418 struct radeon_encoder *radeon_encoder = NULL; 421 struct radeon_encoder *radeon_encoder = NULL;
419 uint8_t frev, crev; 422 u32 adjusted_clock = mode->clock;
420 int index;
421 SET_PIXEL_CLOCK_PS_ALLOCATION args;
422 PIXEL_CLOCK_PARAMETERS *spc1_ptr;
423 PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
424 PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
425 uint32_t pll_clock = mode->clock;
426 uint32_t adjusted_clock;
427 uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
428 struct radeon_pll *pll;
429 int pll_flags = 0;
430 423
431 memset(&args, 0, sizeof(args)); 424 /* reset the pll flags */
425 pll->flags = 0;
432 426
433 if (ASIC_IS_AVIVO(rdev)) { 427 if (ASIC_IS_AVIVO(rdev)) {
434 if ((rdev->family == CHIP_RS600) || 428 if ((rdev->family == CHIP_RS600) ||
435 (rdev->family == CHIP_RS690) || 429 (rdev->family == CHIP_RS690) ||
436 (rdev->family == CHIP_RS740)) 430 (rdev->family == CHIP_RS740))
437 pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | 431 pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
438 RADEON_PLL_PREFER_CLOSEST_LOWER); 432 RADEON_PLL_PREFER_CLOSEST_LOWER);
439 433
440 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 434 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
441 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 435 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
442 else 436 else
443 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 437 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
444 } else { 438 } else {
445 pll_flags |= RADEON_PLL_LEGACY; 439 pll->flags |= RADEON_PLL_LEGACY;
446 440
447 if (mode->clock > 200000) /* range limits??? */ 441 if (mode->clock > 200000) /* range limits??? */
448 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 442 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
449 else 443 else
450 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 444 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
451 445
452 } 446 }
453 447
454 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 448 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
455 if (encoder->crtc == crtc) { 449 if (encoder->crtc == crtc) {
456 if (!ASIC_IS_AVIVO(rdev)) {
457 if (encoder->encoder_type !=
458 DRM_MODE_ENCODER_DAC)
459 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
460 if (encoder->encoder_type ==
461 DRM_MODE_ENCODER_LVDS)
462 pll_flags |= RADEON_PLL_USE_REF_DIV;
463 }
464 radeon_encoder = to_radeon_encoder(encoder); 450 radeon_encoder = to_radeon_encoder(encoder);
451 if (ASIC_IS_AVIVO(rdev)) {
452 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
453 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
454 adjusted_clock = mode->clock * 2;
455 } else {
456 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
457 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
458 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
459 pll->flags |= RADEON_PLL_USE_REF_DIV;
460 }
465 break; 461 break;
466 } 462 }
467 } 463 }
@@ -471,46 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
471 * special hw requirements. 467 * special hw requirements.
472 */ 468 */
473 if (ASIC_IS_DCE3(rdev)) { 469 if (ASIC_IS_DCE3(rdev)) {
474 ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args; 470 union adjust_pixel_clock args;
471 struct radeon_encoder_atom_dig *dig;
472 u8 frev, crev;
473 int index;
475 474
476 if (!encoder) 475 if (!radeon_encoder->enc_priv)
477 return; 476 return adjusted_clock;
478 477 dig = radeon_encoder->enc_priv;
479 memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
480 adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
481 adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
482 adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
483 478
484 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 479 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
485 atom_execute_table(rdev->mode_info.atom_context, 480 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
486 index, (uint32_t *)&adjust_pll_args); 481 &crev);
487 adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10; 482
488 } else { 483 memset(&args, 0, sizeof(args));
489 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 484
490 if (ASIC_IS_AVIVO(rdev) && 485 switch (frev) {
491 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) 486 case 1:
492 adjusted_clock = mode->clock * 2; 487 switch (crev) {
493 else 488 case 1:
494 adjusted_clock = mode->clock; 489 case 2:
490 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
491 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
492 args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
493
494 atom_execute_table(rdev->mode_info.atom_context,
495 index, (uint32_t *)&args);
496 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
497 break;
498 default:
499 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
500 return adjusted_clock;
501 }
502 break;
503 default:
504 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
505 return adjusted_clock;
506 }
495 } 507 }
508 return adjusted_clock;
509}
510
511union set_pixel_clock {
512 SET_PIXEL_CLOCK_PS_ALLOCATION base;
513 PIXEL_CLOCK_PARAMETERS v1;
514 PIXEL_CLOCK_PARAMETERS_V2 v2;
515 PIXEL_CLOCK_PARAMETERS_V3 v3;
516};
517
518void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
519{
520 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
521 struct drm_device *dev = crtc->dev;
522 struct radeon_device *rdev = dev->dev_private;
523 struct drm_encoder *encoder = NULL;
524 struct radeon_encoder *radeon_encoder = NULL;
525 u8 frev, crev;
526 int index;
527 union set_pixel_clock args;
528 u32 pll_clock = mode->clock;
529 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
530 struct radeon_pll *pll;
531 u32 adjusted_clock;
532
533 memset(&args, 0, sizeof(args));
534
535 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
536 if (encoder->crtc == crtc) {
537 radeon_encoder = to_radeon_encoder(encoder);
538 break;
539 }
540 }
541
542 if (!radeon_encoder)
543 return;
496 544
497 if (radeon_crtc->crtc_id == 0) 545 if (radeon_crtc->crtc_id == 0)
498 pll = &rdev->clock.p1pll; 546 pll = &rdev->clock.p1pll;
499 else 547 else
500 pll = &rdev->clock.p2pll; 548 pll = &rdev->clock.p2pll;
501 549
550 /* adjust pixel clock as needed */
551 adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
552
502 if (ASIC_IS_AVIVO(rdev)) { 553 if (ASIC_IS_AVIVO(rdev)) {
503 if (radeon_new_pll) 554 if (radeon_new_pll)
504 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, 555 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
505 &fb_div, &frac_fb_div, 556 &fb_div, &frac_fb_div,
506 &ref_div, &post_div, pll_flags); 557 &ref_div, &post_div);
507 else 558 else
508 radeon_compute_pll(pll, adjusted_clock, &pll_clock, 559 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
509 &fb_div, &frac_fb_div, 560 &fb_div, &frac_fb_div,
510 &ref_div, &post_div, pll_flags); 561 &ref_div, &post_div);
511 } else 562 } else
512 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 563 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
513 &ref_div, &post_div, pll_flags); 564 &ref_div, &post_div);
514 565
515 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 566 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
516 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 567 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -520,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
520 case 1: 571 case 1:
521 switch (crev) { 572 switch (crev) {
522 case 1: 573 case 1:
523 spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; 574 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
524 spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 575 args.v1.usRefDiv = cpu_to_le16(ref_div);
525 spc1_ptr->usRefDiv = cpu_to_le16(ref_div); 576 args.v1.usFbDiv = cpu_to_le16(fb_div);
526 spc1_ptr->usFbDiv = cpu_to_le16(fb_div); 577 args.v1.ucFracFbDiv = frac_fb_div;
527 spc1_ptr->ucFracFbDiv = frac_fb_div; 578 args.v1.ucPostDiv = post_div;
528 spc1_ptr->ucPostDiv = post_div; 579 args.v1.ucPpll =
529 spc1_ptr->ucPpll =
530 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 580 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
531 spc1_ptr->ucCRTC = radeon_crtc->crtc_id; 581 args.v1.ucCRTC = radeon_crtc->crtc_id;
532 spc1_ptr->ucRefDivSrc = 1; 582 args.v1.ucRefDivSrc = 1;
533 break; 583 break;
534 case 2: 584 case 2:
535 spc2_ptr = 585 args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
536 (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; 586 args.v2.usRefDiv = cpu_to_le16(ref_div);
537 spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 587 args.v2.usFbDiv = cpu_to_le16(fb_div);
538 spc2_ptr->usRefDiv = cpu_to_le16(ref_div); 588 args.v2.ucFracFbDiv = frac_fb_div;
539 spc2_ptr->usFbDiv = cpu_to_le16(fb_div); 589 args.v2.ucPostDiv = post_div;
540 spc2_ptr->ucFracFbDiv = frac_fb_div; 590 args.v2.ucPpll =
541 spc2_ptr->ucPostDiv = post_div;
542 spc2_ptr->ucPpll =
543 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 591 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
544 spc2_ptr->ucCRTC = radeon_crtc->crtc_id; 592 args.v2.ucCRTC = radeon_crtc->crtc_id;
545 spc2_ptr->ucRefDivSrc = 1; 593 args.v2.ucRefDivSrc = 1;
546 break; 594 break;
547 case 3: 595 case 3:
548 if (!encoder) 596 args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
549 return; 597 args.v3.usRefDiv = cpu_to_le16(ref_div);
550 spc3_ptr = 598 args.v3.usFbDiv = cpu_to_le16(fb_div);
551 (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; 599 args.v3.ucFracFbDiv = frac_fb_div;
552 spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 600 args.v3.ucPostDiv = post_div;
553 spc3_ptr->usRefDiv = cpu_to_le16(ref_div); 601 args.v3.ucPpll =
554 spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
555 spc3_ptr->ucFracFbDiv = frac_fb_div;
556 spc3_ptr->ucPostDiv = post_div;
557 spc3_ptr->ucPpll =
558 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 602 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
559 spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); 603 args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
560 spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id; 604 args.v3.ucTransmitterId = radeon_encoder->encoder_id;
561 spc3_ptr->ucEncoderMode = 605 args.v3.ucEncoderMode =
562 atombios_get_encoder_mode(encoder); 606 atombios_get_encoder_mode(encoder);
563 break; 607 break;
564 default: 608 default:
@@ -571,12 +615,11 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
571 return; 615 return;
572 } 616 }
573 617
574 printk("executing set pll\n");
575 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
576} 619}
577 620
578int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, 621static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
579 struct drm_framebuffer *old_fb) 622 struct drm_framebuffer *old_fb)
580{ 623{
581 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 624 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
582 struct drm_device *dev = crtc->dev; 625 struct drm_device *dev = crtc->dev;
@@ -706,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
706 return 0; 749 return 0;
707} 750}
708 751
752int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
753 struct drm_framebuffer *old_fb)
754{
755 struct drm_device *dev = crtc->dev;
756 struct radeon_device *rdev = dev->dev_private;
757
758 if (ASIC_IS_AVIVO(rdev))
759 return avivo_crtc_set_base(crtc, x, y, old_fb);
760 else
761 return radeon_crtc_set_base(crtc, x, y, old_fb);
762}
763
764/* properly set additional regs when using atombios */
765static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
766{
767 struct drm_device *dev = crtc->dev;
768 struct radeon_device *rdev = dev->dev_private;
769 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
770 u32 disp_merge_cntl;
771
772 switch (radeon_crtc->crtc_id) {
773 case 0:
774 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
775 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
776 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
777 break;
778 case 1:
779 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
780 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
781 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
782 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
783 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
784 break;
785 }
786}
787
709int atombios_crtc_mode_set(struct drm_crtc *crtc, 788int atombios_crtc_mode_set(struct drm_crtc *crtc,
710 struct drm_display_mode *mode, 789 struct drm_display_mode *mode,
711 struct drm_display_mode *adjusted_mode, 790 struct drm_display_mode *adjusted_mode,
@@ -727,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
727 else { 806 else {
728 if (radeon_crtc->crtc_id == 0) 807 if (radeon_crtc->crtc_id == 0)
729 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 808 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
730 radeon_crtc_set_base(crtc, x, y, old_fb); 809 atombios_crtc_set_base(crtc, x, y, old_fb);
731 radeon_legacy_atom_set_surface(crtc); 810 radeon_legacy_atom_fixup(crtc);
732 } 811 }
733 atombios_overscan_setup(crtc, mode, adjusted_mode); 812 atombios_overscan_setup(crtc, mode, adjusted_mode);
734 atombios_scaler_setup(crtc); 813 atombios_scaler_setup(crtc);
@@ -746,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
746 825
747static void atombios_crtc_prepare(struct drm_crtc *crtc) 826static void atombios_crtc_prepare(struct drm_crtc *crtc)
748{ 827{
749 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
750 atombios_lock_crtc(crtc, 1); 828 atombios_lock_crtc(crtc, 1);
829 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
751} 830}
752 831
753static void atombios_crtc_commit(struct drm_crtc *crtc) 832static void atombios_crtc_commit(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 0d63c4436e7c..99915a682d59 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base; 334 unsigned char *base;
335 int retry_count = 0;
335 336
336 memset(&args, 0, sizeof(args)); 337 memset(&args, 0, sizeof(args));
337 338
338 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 339 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
339 340
341retry:
340 memcpy(base, req_bytes, num_bytes); 342 memcpy(base, req_bytes, num_bytes);
341 343
342 args.lpAuxRequest = 0; 344 args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
347 349
348 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
349 351
350 if (args.ucReplyStatus) { 352 if (args.ucReplyStatus && !args.ucDataOutLen) {
351 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", 353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
352 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
353 chan->rec.i2c_id, args.ucReplyStatus); 357 chan->rec.i2c_id, args.ucReplyStatus, retry_count);
354 return false; 358 return false;
355 } 359 }
356 360
@@ -468,7 +472,8 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
468 struct radeon_connector *radeon_connector; 472 struct radeon_connector *radeon_connector;
469 struct radeon_connector_atom_dig *dig_connector; 473 struct radeon_connector_atom_dig *dig_connector;
470 474
471 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 475 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
476 (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
472 return; 477 return;
473 478
474 radeon_connector = to_radeon_connector(connector); 479 radeon_connector = to_radeon_connector(connector);
@@ -582,7 +587,8 @@ void dp_link_train(struct drm_encoder *encoder,
582 u8 train_set[4]; 587 u8 train_set[4];
583 int i; 588 int i;
584 589
585 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 590 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
591 (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
586 return; 592 return;
587 593
588 if (!radeon_encoder->enc_priv) 594 if (!radeon_encoder->enc_priv)
@@ -594,21 +600,14 @@ void dp_link_train(struct drm_encoder *encoder,
594 return; 600 return;
595 dig_connector = radeon_connector->con_priv; 601 dig_connector = radeon_connector->con_priv;
596 602
597 if (ASIC_IS_DCE32(rdev)) { 603 if (dig->dig_encoder)
598 if (dig->dig_block) 604 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
599 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 605 else
600 else 606 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
601 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; 607 if (dig_connector->linkb)
602 if (dig_connector->linkb) 608 enc_id |= ATOM_DP_CONFIG_LINK_B;
603 enc_id |= ATOM_DP_CONFIG_LINK_B; 609 else
604 else 610 enc_id |= ATOM_DP_CONFIG_LINK_A;
605 enc_id |= ATOM_DP_CONFIG_LINK_A;
606 } else {
607 if (dig_connector->linkb)
608 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
609 else
610 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
611 }
612 611
613 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 612 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
614 if (dig_connector->dp_clock == 270000) 613 if (dig_connector->dp_clock == 270000)
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 0d79577c1576..607241c6a8a9 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -661,8 +661,10 @@ static int parser_auth(struct table *t, const char *filename)
661 fseek(file, 0, SEEK_SET); 661 fseek(file, 0, SEEK_SET);
662 662
663 /* get header */ 663 /* get header */
664 if (fgets(buf, 1024, file) == NULL) 664 if (fgets(buf, 1024, file) == NULL) {
665 fclose(file);
665 return -1; 666 return -1;
667 }
666 668
667 /* first line will contain the last register 669 /* first line will contain the last register
668 * and gpu name */ 670 * and gpu name */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 71727460968f..c0d4650cdb79 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -131,7 +131,8 @@ void r100_hpd_init(struct radeon_device *rdev)
131 break; 131 break;
132 } 132 }
133 } 133 }
134 r100_irq_set(rdev); 134 if (rdev->irq.installed)
135 r100_irq_set(rdev);
135} 136}
136 137
137void r100_hpd_fini(struct radeon_device *rdev) 138void r100_hpd_fini(struct radeon_device *rdev)
@@ -243,6 +244,11 @@ int r100_irq_set(struct radeon_device *rdev)
243{ 244{
244 uint32_t tmp = 0; 245 uint32_t tmp = 0;
245 246
247 if (!rdev->irq.installed) {
248 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
249 WREG32(R_000040_GEN_INT_CNTL, 0);
250 return -EINVAL;
251 }
246 if (rdev->irq.sw_int) { 252 if (rdev->irq.sw_int) {
247 tmp |= RADEON_SW_INT_ENABLE; 253 tmp |= RADEON_SW_INT_ENABLE;
248 } 254 }
@@ -348,14 +354,25 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
348 return RREG32(RADEON_CRTC2_CRNT_FRAME); 354 return RREG32(RADEON_CRTC2_CRNT_FRAME);
349} 355}
350 356
357/* Who ever call radeon_fence_emit should call ring_lock and ask
358 * for enough space (today caller are ib schedule and buffer move) */
351void r100_fence_ring_emit(struct radeon_device *rdev, 359void r100_fence_ring_emit(struct radeon_device *rdev,
352 struct radeon_fence *fence) 360 struct radeon_fence *fence)
353{ 361{
354 /* Who ever call radeon_fence_emit should call ring_lock and ask 362 /* We have to make sure that caches are flushed before
355 * for enough space (today caller are ib schedule and buffer move) */ 363 * CPU might read something from VRAM. */
364 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
365 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
356 /* Wait until IDLE & CLEAN */ 368 /* Wait until IDLE & CLEAN */
357 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 369 radeon_ring_write(rdev, PACKET0(0x1720, 0));
358 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 370 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
371 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
372 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
373 RADEON_HDP_READ_BUFFER_INVALIDATE);
374 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
375 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
359 /* Emit fence sequence & fire IRQ */ 376 /* Emit fence sequence & fire IRQ */
360 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 377 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
361 radeon_ring_write(rdev, fence->seq); 378 radeon_ring_write(rdev, fence->seq);
@@ -1493,6 +1510,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1493 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1510 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1494 return -EINVAL; 1511 return -EINVAL;
1495 } 1512 }
1513 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1496 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1514 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1497 track->immd_dwords = pkt->count - 1; 1515 track->immd_dwords = pkt->count - 1;
1498 r = r100_cs_track_check(p->rdev, track); 1516 r = r100_cs_track_check(p->rdev, track);
@@ -1713,14 +1731,6 @@ void r100_gpu_init(struct radeon_device *rdev)
1713 r100_hdp_reset(rdev); 1731 r100_hdp_reset(rdev);
1714} 1732}
1715 1733
1716void r100_hdp_flush(struct radeon_device *rdev)
1717{
1718 u32 tmp;
1719 tmp = RREG32(RADEON_HOST_PATH_CNTL);
1720 tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
1721 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1722}
1723
1724void r100_hdp_reset(struct radeon_device *rdev) 1734void r100_hdp_reset(struct radeon_device *rdev)
1725{ 1735{
1726 uint32_t tmp; 1736 uint32_t tmp;
@@ -3313,6 +3323,7 @@ static int r100_startup(struct radeon_device *rdev)
3313 } 3323 }
3314 /* Enable IRQ */ 3324 /* Enable IRQ */
3315 r100_irq_set(rdev); 3325 r100_irq_set(rdev);
3326 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3316 /* 1M ring buffer */ 3327 /* 1M ring buffer */
3317 r = r100_cp_init(rdev, 1024 * 1024); 3328 r = r100_cp_init(rdev, 1024 * 1024);
3318 if (r) { 3329 if (r) {
@@ -3364,13 +3375,13 @@ int r100_suspend(struct radeon_device *rdev)
3364 3375
3365void r100_fini(struct radeon_device *rdev) 3376void r100_fini(struct radeon_device *rdev)
3366{ 3377{
3367 r100_suspend(rdev);
3368 r100_cp_fini(rdev); 3378 r100_cp_fini(rdev);
3369 r100_wb_fini(rdev); 3379 r100_wb_fini(rdev);
3370 r100_ib_fini(rdev); 3380 r100_ib_fini(rdev);
3371 radeon_gem_fini(rdev); 3381 radeon_gem_fini(rdev);
3372 if (rdev->flags & RADEON_IS_PCI) 3382 if (rdev->flags & RADEON_IS_PCI)
3373 r100_pci_gart_fini(rdev); 3383 r100_pci_gart_fini(rdev);
3384 radeon_agp_fini(rdev);
3374 radeon_irq_kms_fini(rdev); 3385 radeon_irq_kms_fini(rdev);
3375 radeon_fence_driver_fini(rdev); 3386 radeon_fence_driver_fini(rdev);
3376 radeon_bo_fini(rdev); 3387 radeon_bo_fini(rdev);
@@ -3394,9 +3405,7 @@ int r100_mc_init(struct radeon_device *rdev)
3394 if (rdev->flags & RADEON_IS_AGP) { 3405 if (rdev->flags & RADEON_IS_AGP) {
3395 r = radeon_agp_init(rdev); 3406 r = radeon_agp_init(rdev);
3396 if (r) { 3407 if (r) {
3397 printk(KERN_WARNING "[drm] Disabling AGP\n"); 3408 radeon_agp_disable(rdev);
3398 rdev->flags &= ~RADEON_IS_AGP;
3399 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
3400 } else { 3409 } else {
3401 rdev->mc.gtt_location = rdev->mc.agp_base; 3410 rdev->mc.gtt_location = rdev->mc.agp_base;
3402 } 3411 }
@@ -3477,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
3477 if (r) { 3486 if (r) {
3478 /* Somethings want wront with the accel init stop accel */ 3487 /* Somethings want wront with the accel init stop accel */
3479 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 3488 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3480 r100_suspend(rdev);
3481 r100_cp_fini(rdev); 3489 r100_cp_fini(rdev);
3482 r100_wb_fini(rdev); 3490 r100_wb_fini(rdev);
3483 r100_ib_fini(rdev); 3491 r100_ib_fini(rdev);
3492 radeon_irq_kms_fini(rdev);
3484 if (rdev->flags & RADEON_IS_PCI) 3493 if (rdev->flags & RADEON_IS_PCI)
3485 r100_pci_gart_fini(rdev); 3494 r100_pci_gart_fini(rdev);
3486 radeon_irq_kms_fini(rdev);
3487 rdev->accel_working = false; 3495 rdev->accel_working = false;
3488 } 3496 }
3489 return 0; 3497 return 0;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 20942127c46b..ff1e0cd608bf 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
371 case 5: 371 case 5:
372 case 6: 372 case 6:
373 case 7: 373 case 7:
374 /* 1D/2D */
374 track->textures[i].tex_coord_type = 0; 375 track->textures[i].tex_coord_type = 0;
375 break; 376 break;
376 case 1: 377 case 1:
377 track->textures[i].tex_coord_type = 1; 378 /* CUBE */
379 track->textures[i].tex_coord_type = 2;
378 break; 380 break;
379 case 2: 381 case 2:
380 track->textures[i].tex_coord_type = 2; 382 /* 3D */
383 track->textures[i].tex_coord_type = 1;
381 break; 384 break;
382 } 385 }
383 break; 386 break;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 3f2cc9e2e8d9..43b55a030b4d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -36,7 +36,15 @@
36#include "rv350d.h" 36#include "rv350d.h"
37#include "r300_reg_safe.h" 37#include "r300_reg_safe.h"
38 38
39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ 39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
40 *
41 * GPU Errata:
42 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
43 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
44 * However, scheduling such write to the ring seems harmless, i suspect
45 * the CP read collide with the flush somehow, or maybe the MC, hard to
46 * tell. (Jerome Glisse)
47 */
40 48
41/* 49/*
42 * rv370,rv380 PCIE GART 50 * rv370,rv380 PCIE GART
@@ -178,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
178 /* Wait until IDLE & CLEAN */ 186 /* Wait until IDLE & CLEAN */
179 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 187 radeon_ring_write(rdev, PACKET0(0x1720, 0));
180 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); 188 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
189 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
190 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
191 RADEON_HDP_READ_BUFFER_INVALIDATE);
192 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
193 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
181 /* Emit fence sequence & fire IRQ */ 194 /* Emit fence sequence & fire IRQ */
182 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 195 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
183 radeon_ring_write(rdev, fence->seq); 196 radeon_ring_write(rdev, fence->seq);
@@ -493,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
493 506
494 /* DDR for all card after R300 & IGP */ 507 /* DDR for all card after R300 & IGP */
495 rdev->mc.vram_is_ddr = true; 508 rdev->mc.vram_is_ddr = true;
509
496 tmp = RREG32(RADEON_MEM_CNTL); 510 tmp = RREG32(RADEON_MEM_CNTL);
497 if (tmp & R300_MEM_NUM_CHANNELS_MASK) { 511 tmp &= R300_MEM_NUM_CHANNELS_MASK;
498 rdev->mc.vram_width = 128; 512 switch (tmp) {
499 } else { 513 case 0: rdev->mc.vram_width = 64; break;
500 rdev->mc.vram_width = 64; 514 case 1: rdev->mc.vram_width = 128; break;
515 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break;
501 } 517 }
502 518
503 r100_vram_init_sizes(rdev); 519 r100_vram_init_sizes(rdev);
@@ -1258,6 +1274,7 @@ static int r300_startup(struct radeon_device *rdev)
1258 } 1274 }
1259 /* Enable IRQ */ 1275 /* Enable IRQ */
1260 r100_irq_set(rdev); 1276 r100_irq_set(rdev);
1277 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1261 /* 1M ring buffer */ 1278 /* 1M ring buffer */
1262 r = r100_cp_init(rdev, 1024 * 1024); 1279 r = r100_cp_init(rdev, 1024 * 1024);
1263 if (r) { 1280 if (r) {
@@ -1313,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
1313 1330
1314void r300_fini(struct radeon_device *rdev) 1331void r300_fini(struct radeon_device *rdev)
1315{ 1332{
1316 r300_suspend(rdev);
1317 r100_cp_fini(rdev); 1333 r100_cp_fini(rdev);
1318 r100_wb_fini(rdev); 1334 r100_wb_fini(rdev);
1319 r100_ib_fini(rdev); 1335 r100_ib_fini(rdev);
@@ -1322,6 +1338,7 @@ void r300_fini(struct radeon_device *rdev)
1322 rv370_pcie_gart_fini(rdev); 1338 rv370_pcie_gart_fini(rdev);
1323 if (rdev->flags & RADEON_IS_PCI) 1339 if (rdev->flags & RADEON_IS_PCI)
1324 r100_pci_gart_fini(rdev); 1340 r100_pci_gart_fini(rdev);
1341 radeon_agp_fini(rdev);
1325 radeon_irq_kms_fini(rdev); 1342 radeon_irq_kms_fini(rdev);
1326 radeon_fence_driver_fini(rdev); 1343 radeon_fence_driver_fini(rdev);
1327 radeon_bo_fini(rdev); 1344 radeon_bo_fini(rdev);
@@ -1403,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
1403 if (r) { 1420 if (r) {
1404 /* Somethings want wront with the accel init stop accel */ 1421 /* Somethings want wront with the accel init stop accel */
1405 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1422 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1406 r300_suspend(rdev);
1407 r100_cp_fini(rdev); 1423 r100_cp_fini(rdev);
1408 r100_wb_fini(rdev); 1424 r100_wb_fini(rdev);
1409 r100_ib_fini(rdev); 1425 r100_ib_fini(rdev);
1426 radeon_irq_kms_fini(rdev);
1410 if (rdev->flags & RADEON_IS_PCIE) 1427 if (rdev->flags & RADEON_IS_PCIE)
1411 rv370_pcie_gart_fini(rdev); 1428 rv370_pcie_gart_fini(rdev);
1412 if (rdev->flags & RADEON_IS_PCI) 1429 if (rdev->flags & RADEON_IS_PCI)
1413 r100_pci_gart_fini(rdev); 1430 r100_pci_gart_fini(rdev);
1414 radeon_irq_kms_fini(rdev); 1431 radeon_agp_fini(rdev);
1415 rdev->accel_working = false; 1432 rdev->accel_working = false;
1416 } 1433 }
1417 return 0; 1434 return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c05a7270cf0c..d9373246c97f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -30,7 +30,15 @@
30#include "radeon_reg.h" 30#include "radeon_reg.h"
31#include "radeon.h" 31#include "radeon.h"
32#include "atom.h" 32#include "atom.h"
33#include "r100d.h"
33#include "r420d.h" 34#include "r420d.h"
35#include "r420_reg_safe.h"
36
37static void r420_set_reg_safe(struct radeon_device *rdev)
38{
39 rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
40 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
41}
34 42
35int r420_mc_init(struct radeon_device *rdev) 43int r420_mc_init(struct radeon_device *rdev)
36{ 44{
@@ -42,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
42 if (rdev->flags & RADEON_IS_AGP) { 50 if (rdev->flags & RADEON_IS_AGP) {
43 r = radeon_agp_init(rdev); 51 r = radeon_agp_init(rdev);
44 if (r) { 52 if (r) {
45 printk(KERN_WARNING "[drm] Disabling AGP\n"); 53 radeon_agp_disable(rdev);
46 rdev->flags &= ~RADEON_IS_AGP;
47 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
48 } else { 54 } else {
49 rdev->mc.gtt_location = rdev->mc.agp_base; 55 rdev->mc.gtt_location = rdev->mc.agp_base;
50 } 56 }
@@ -165,6 +171,34 @@ static void r420_clock_resume(struct radeon_device *rdev)
165 WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); 171 WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
166} 172}
167 173
174static void r420_cp_errata_init(struct radeon_device *rdev)
175{
176 /* RV410 and R420 can lock up if CP DMA to host memory happens
177 * while the 2D engine is busy.
178 *
179 * The proper workaround is to queue a RESYNC at the beginning
180 * of the CP init, apparently.
181 */
182 radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
183 radeon_ring_lock(rdev, 8);
184 radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
185 radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
186 radeon_ring_write(rdev, 0xDEADBEEF);
187 radeon_ring_unlock_commit(rdev);
188}
189
190static void r420_cp_errata_fini(struct radeon_device *rdev)
191{
192 /* Catch the RESYNC we dispatched all the way back,
193 * at the very beginning of the CP init.
194 */
195 radeon_ring_lock(rdev, 8);
196 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
197 radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
198 radeon_ring_unlock_commit(rdev);
199 radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
200}
201
168static int r420_startup(struct radeon_device *rdev) 202static int r420_startup(struct radeon_device *rdev)
169{ 203{
170 int r; 204 int r;
@@ -190,12 +224,14 @@ static int r420_startup(struct radeon_device *rdev)
190 r420_pipes_init(rdev); 224 r420_pipes_init(rdev);
191 /* Enable IRQ */ 225 /* Enable IRQ */
192 r100_irq_set(rdev); 226 r100_irq_set(rdev);
227 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
193 /* 1M ring buffer */ 228 /* 1M ring buffer */
194 r = r100_cp_init(rdev, 1024 * 1024); 229 r = r100_cp_init(rdev, 1024 * 1024);
195 if (r) { 230 if (r) {
196 dev_err(rdev->dev, "failled initializing CP (%d).\n", r); 231 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
197 return r; 232 return r;
198 } 233 }
234 r420_cp_errata_init(rdev);
199 r = r100_wb_init(rdev); 235 r = r100_wb_init(rdev);
200 if (r) { 236 if (r) {
201 dev_err(rdev->dev, "failled initializing WB (%d).\n", r); 237 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
@@ -238,6 +274,7 @@ int r420_resume(struct radeon_device *rdev)
238 274
239int r420_suspend(struct radeon_device *rdev) 275int r420_suspend(struct radeon_device *rdev)
240{ 276{
277 r420_cp_errata_fini(rdev);
241 r100_cp_disable(rdev); 278 r100_cp_disable(rdev);
242 r100_wb_disable(rdev); 279 r100_wb_disable(rdev);
243 r100_irq_disable(rdev); 280 r100_irq_disable(rdev);
@@ -346,22 +383,21 @@ int r420_init(struct radeon_device *rdev)
346 if (r) 383 if (r)
347 return r; 384 return r;
348 } 385 }
349 r300_set_reg_safe(rdev); 386 r420_set_reg_safe(rdev);
350 rdev->accel_working = true; 387 rdev->accel_working = true;
351 r = r420_startup(rdev); 388 r = r420_startup(rdev);
352 if (r) { 389 if (r) {
353 /* Somethings want wront with the accel init stop accel */ 390 /* Somethings want wront with the accel init stop accel */
354 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 391 dev_err(rdev->dev, "Disabling GPU acceleration\n");
355 r420_suspend(rdev);
356 r100_cp_fini(rdev); 392 r100_cp_fini(rdev);
357 r100_wb_fini(rdev); 393 r100_wb_fini(rdev);
358 r100_ib_fini(rdev); 394 r100_ib_fini(rdev);
395 radeon_irq_kms_fini(rdev);
359 if (rdev->flags & RADEON_IS_PCIE) 396 if (rdev->flags & RADEON_IS_PCIE)
360 rv370_pcie_gart_fini(rdev); 397 rv370_pcie_gart_fini(rdev);
361 if (rdev->flags & RADEON_IS_PCI) 398 if (rdev->flags & RADEON_IS_PCI)
362 r100_pci_gart_fini(rdev); 399 r100_pci_gart_fini(rdev);
363 radeon_agp_fini(rdev); 400 radeon_agp_fini(rdev);
364 radeon_irq_kms_fini(rdev);
365 rdev->accel_working = false; 401 rdev->accel_working = false;
366 } 402 }
367 return 0; 403 return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 0f3843b6dac7..ddf5731eba0d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -186,6 +186,7 @@ static int r520_startup(struct radeon_device *rdev)
186 } 186 }
187 /* Enable IRQ */ 187 /* Enable IRQ */
188 rs600_irq_set(rdev); 188 rs600_irq_set(rdev);
189 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
189 /* 1M ring buffer */ 190 /* 1M ring buffer */
190 r = r100_cp_init(rdev, 1024 * 1024); 191 r = r100_cp_init(rdev, 1024 * 1024);
191 if (r) { 192 if (r) {
@@ -293,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
293 if (r) { 294 if (r) {
294 /* Somethings want wront with the accel init stop accel */ 295 /* Somethings want wront with the accel init stop accel */
295 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 296 dev_err(rdev->dev, "Disabling GPU acceleration\n");
296 rv515_suspend(rdev);
297 r100_cp_fini(rdev); 297 r100_cp_fini(rdev);
298 r100_wb_fini(rdev); 298 r100_wb_fini(rdev);
299 r100_ib_fini(rdev); 299 r100_ib_fini(rdev);
300 radeon_irq_kms_fini(rdev);
300 rv370_pcie_gart_fini(rdev); 301 rv370_pcie_gart_fini(rdev);
301 radeon_agp_fini(rdev); 302 radeon_agp_fini(rdev);
302 radeon_irq_kms_fini(rdev);
303 rdev->accel_working = false; 303 rdev->accel_working = false;
304 } 304 }
305 return 0; 305 return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a0ac3c134b1b..2ffcf5a03551 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -285,7 +285,8 @@ void r600_hpd_init(struct radeon_device *rdev)
285 } 285 }
286 } 286 }
287 } 287 }
288 r600_irq_set(rdev); 288 if (rdev->irq.installed)
289 r600_irq_set(rdev);
289} 290}
290 291
291void r600_hpd_fini(struct radeon_device *rdev) 292void r600_hpd_fini(struct radeon_device *rdev)
@@ -623,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev)
623 fixed20_12 a; 624 fixed20_12 a;
624 u32 tmp; 625 u32 tmp;
625 int chansize, numchan; 626 int chansize, numchan;
626 int r;
627 627
628 /* Get VRAM informations */ 628 /* Get VRAM informations */
629 rdev->mc.vram_is_ddr = true; 629 rdev->mc.vram_is_ddr = true;
@@ -666,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev)
666 rdev->mc.real_vram_size = rdev->mc.aper_size; 666 rdev->mc.real_vram_size = rdev->mc.aper_size;
667 667
668 if (rdev->flags & RADEON_IS_AGP) { 668 if (rdev->flags & RADEON_IS_AGP) {
669 r = radeon_agp_init(rdev);
670 if (r)
671 return r;
672 /* gtt_size is setup by radeon_agp_init */ 669 /* gtt_size is setup by radeon_agp_init */
673 rdev->mc.gtt_location = rdev->mc.agp_base; 670 rdev->mc.gtt_location = rdev->mc.agp_base;
674 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; 671 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -726,6 +723,10 @@ int r600_mc_init(struct radeon_device *rdev)
726 a.full = rfixed_const(100); 723 a.full = rfixed_const(100);
727 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); 724 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
728 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 725 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
726
727 if (rdev->flags & RADEON_IS_IGP)
728 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
729
729 return 0; 730 return 0;
730} 731}
731 732
@@ -1384,11 +1385,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1384 (void)RREG32(PCIE_PORT_DATA); 1385 (void)RREG32(PCIE_PORT_DATA);
1385} 1386}
1386 1387
1387void r600_hdp_flush(struct radeon_device *rdev)
1388{
1389 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1390}
1391
1392/* 1388/*
1393 * CP & Ring 1389 * CP & Ring
1394 */ 1390 */
@@ -1658,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1658 rdev->cp.align_mask = 16 - 1; 1654 rdev->cp.align_mask = 16 - 1;
1659} 1655}
1660 1656
1657void r600_cp_fini(struct radeon_device *rdev)
1658{
1659 r600_cp_stop(rdev);
1660 radeon_ring_fini(rdev);
1661}
1662
1661 1663
1662/* 1664/*
1663 * GPU scratch registers helpers function. 1665 * GPU scratch registers helpers function.
@@ -1785,28 +1787,31 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1785 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1787 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1786 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1788 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1787 radeon_ring_write(rdev, fence->seq); 1789 radeon_ring_write(rdev, fence->seq);
1790 radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1791 radeon_ring_write(rdev, 1);
1788 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 1792 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1789 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); 1793 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1790 radeon_ring_write(rdev, RB_INT_STAT); 1794 radeon_ring_write(rdev, RB_INT_STAT);
1791} 1795}
1792 1796
1793int r600_copy_dma(struct radeon_device *rdev,
1794 uint64_t src_offset,
1795 uint64_t dst_offset,
1796 unsigned num_pages,
1797 struct radeon_fence *fence)
1798{
1799 /* FIXME: implement */
1800 return 0;
1801}
1802
1803int r600_copy_blit(struct radeon_device *rdev, 1797int r600_copy_blit(struct radeon_device *rdev,
1804 uint64_t src_offset, uint64_t dst_offset, 1798 uint64_t src_offset, uint64_t dst_offset,
1805 unsigned num_pages, struct radeon_fence *fence) 1799 unsigned num_pages, struct radeon_fence *fence)
1806{ 1800{
1807 r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 1801 int r;
1802
1803 mutex_lock(&rdev->r600_blit.mutex);
1804 rdev->r600_blit.vb_ib = NULL;
1805 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1806 if (r) {
1807 if (rdev->r600_blit.vb_ib)
1808 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
1809 mutex_unlock(&rdev->r600_blit.mutex);
1810 return r;
1811 }
1808 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 1812 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
1809 r600_blit_done_copy(rdev, fence); 1813 r600_blit_done_copy(rdev, fence);
1814 mutex_unlock(&rdev->r600_blit.mutex);
1810 return 0; 1815 return 0;
1811} 1816}
1812 1817
@@ -1862,26 +1867,25 @@ int r600_startup(struct radeon_device *rdev)
1862 return r; 1867 return r;
1863 } 1868 }
1864 r600_gpu_init(rdev); 1869 r600_gpu_init(rdev);
1865 1870 r = r600_blit_init(rdev);
1866 if (!rdev->r600_blit.shader_obj) { 1871 if (r) {
1867 r = r600_blit_init(rdev); 1872 r600_blit_fini(rdev);
1873 rdev->asic->copy = NULL;
1874 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1875 }
1876 /* pin copy shader into vram */
1877 if (rdev->r600_blit.shader_obj) {
1878 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1879 if (unlikely(r != 0))
1880 return r;
1881 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1882 &rdev->r600_blit.shader_gpu_addr);
1883 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1868 if (r) { 1884 if (r) {
1869 DRM_ERROR("radeon: failed blitter (%d).\n", r); 1885 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1870 return r; 1886 return r;
1871 } 1887 }
1872 } 1888 }
1873
1874 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1875 if (unlikely(r != 0))
1876 return r;
1877 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1878 &rdev->r600_blit.shader_gpu_addr);
1879 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1880 if (r) {
1881 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1882 return r;
1883 }
1884
1885 /* Enable IRQ */ 1889 /* Enable IRQ */
1886 r = r600_irq_init(rdev); 1890 r = r600_irq_init(rdev);
1887 if (r) { 1891 if (r) {
@@ -1946,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
1946 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1947 return r; 1951 return r;
1948 } 1952 }
1953
1954 r = r600_audio_init(rdev);
1955 if (r) {
1956 DRM_ERROR("radeon: audio resume failed\n");
1957 return r;
1958 }
1959
1949 return r; 1960 return r;
1950} 1961}
1951 1962
@@ -1953,17 +1964,21 @@ int r600_suspend(struct radeon_device *rdev)
1953{ 1964{
1954 int r; 1965 int r;
1955 1966
1967 r600_audio_fini(rdev);
1956 /* FIXME: we should wait for ring to be empty */ 1968 /* FIXME: we should wait for ring to be empty */
1957 r600_cp_stop(rdev); 1969 r600_cp_stop(rdev);
1958 rdev->cp.ready = false; 1970 rdev->cp.ready = false;
1971 r600_irq_suspend(rdev);
1959 r600_wb_disable(rdev); 1972 r600_wb_disable(rdev);
1960 r600_pcie_gart_disable(rdev); 1973 r600_pcie_gart_disable(rdev);
1961 /* unpin shaders bo */ 1974 /* unpin shaders bo */
1962 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1975 if (rdev->r600_blit.shader_obj) {
1963 if (unlikely(r != 0)) 1976 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1964 return r; 1977 if (!r) {
1965 radeon_bo_unpin(rdev->r600_blit.shader_obj); 1978 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1966 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1979 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1980 }
1981 }
1967 return 0; 1982 return 0;
1968} 1983}
1969 1984
@@ -2024,6 +2039,11 @@ int r600_init(struct radeon_device *rdev)
2024 r = radeon_fence_driver_init(rdev); 2039 r = radeon_fence_driver_init(rdev);
2025 if (r) 2040 if (r)
2026 return r; 2041 return r;
2042 if (rdev->flags & RADEON_IS_AGP) {
2043 r = radeon_agp_init(rdev);
2044 if (r)
2045 radeon_agp_disable(rdev);
2046 }
2027 r = r600_mc_init(rdev); 2047 r = r600_mc_init(rdev);
2028 if (r) 2048 if (r)
2029 return r; 2049 return r;
@@ -2049,22 +2069,25 @@ int r600_init(struct radeon_device *rdev)
2049 rdev->accel_working = true; 2069 rdev->accel_working = true;
2050 r = r600_startup(rdev); 2070 r = r600_startup(rdev);
2051 if (r) { 2071 if (r) {
2052 r600_suspend(rdev); 2072 dev_err(rdev->dev, "disabling GPU acceleration\n");
2073 r600_cp_fini(rdev);
2053 r600_wb_fini(rdev); 2074 r600_wb_fini(rdev);
2054 radeon_ring_fini(rdev); 2075 r600_irq_fini(rdev);
2076 radeon_irq_kms_fini(rdev);
2055 r600_pcie_gart_fini(rdev); 2077 r600_pcie_gart_fini(rdev);
2056 rdev->accel_working = false; 2078 rdev->accel_working = false;
2057 } 2079 }
2058 if (rdev->accel_working) { 2080 if (rdev->accel_working) {
2059 r = radeon_ib_pool_init(rdev); 2081 r = radeon_ib_pool_init(rdev);
2060 if (r) { 2082 if (r) {
2061 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 2083 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2062 rdev->accel_working = false;
2063 }
2064 r = r600_ib_test(rdev);
2065 if (r) {
2066 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2067 rdev->accel_working = false; 2084 rdev->accel_working = false;
2085 } else {
2086 r = r600_ib_test(rdev);
2087 if (r) {
2088 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2089 rdev->accel_working = false;
2090 }
2068 } 2091 }
2069 } 2092 }
2070 2093
@@ -2076,21 +2099,17 @@ int r600_init(struct radeon_device *rdev)
2076 2099
2077void r600_fini(struct radeon_device *rdev) 2100void r600_fini(struct radeon_device *rdev)
2078{ 2101{
2079 /* Suspend operations */
2080 r600_suspend(rdev);
2081
2082 r600_audio_fini(rdev); 2102 r600_audio_fini(rdev);
2083 r600_blit_fini(rdev); 2103 r600_blit_fini(rdev);
2104 r600_cp_fini(rdev);
2105 r600_wb_fini(rdev);
2084 r600_irq_fini(rdev); 2106 r600_irq_fini(rdev);
2085 radeon_irq_kms_fini(rdev); 2107 radeon_irq_kms_fini(rdev);
2086 radeon_ring_fini(rdev);
2087 r600_wb_fini(rdev);
2088 r600_pcie_gart_fini(rdev); 2108 r600_pcie_gart_fini(rdev);
2109 radeon_agp_fini(rdev);
2089 radeon_gem_fini(rdev); 2110 radeon_gem_fini(rdev);
2090 radeon_fence_driver_fini(rdev); 2111 radeon_fence_driver_fini(rdev);
2091 radeon_clocks_fini(rdev); 2112 radeon_clocks_fini(rdev);
2092 if (rdev->flags & RADEON_IS_AGP)
2093 radeon_agp_fini(rdev);
2094 radeon_bo_fini(rdev); 2113 radeon_bo_fini(rdev);
2095 radeon_atombios_fini(rdev); 2114 radeon_atombios_fini(rdev);
2096 kfree(rdev->bios); 2115 kfree(rdev->bios);
@@ -2196,14 +2215,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2196 rb_bufsz = drm_order(ring_size / 4); 2215 rb_bufsz = drm_order(ring_size / 4);
2197 ring_size = (1 << rb_bufsz) * 4; 2216 ring_size = (1 << rb_bufsz) * 4;
2198 rdev->ih.ring_size = ring_size; 2217 rdev->ih.ring_size = ring_size;
2199 rdev->ih.align_mask = 4 - 1; 2218 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2219 rdev->ih.rptr = 0;
2200} 2220}
2201 2221
2202static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) 2222static int r600_ih_ring_alloc(struct radeon_device *rdev)
2203{ 2223{
2204 int r; 2224 int r;
2205 2225
2206 rdev->ih.ring_size = ring_size;
2207 /* Allocate ring buffer */ 2226 /* Allocate ring buffer */
2208 if (rdev->ih.ring_obj == NULL) { 2227 if (rdev->ih.ring_obj == NULL) {
2209 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, 2228 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
@@ -2233,9 +2252,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
2233 return r; 2252 return r;
2234 } 2253 }
2235 } 2254 }
2236 rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
2237 rdev->ih.rptr = 0;
2238
2239 return 0; 2255 return 0;
2240} 2256}
2241 2257
@@ -2385,7 +2401,7 @@ int r600_irq_init(struct radeon_device *rdev)
2385 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 2401 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2386 2402
2387 /* allocate ring */ 2403 /* allocate ring */
2388 ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); 2404 ret = r600_ih_ring_alloc(rdev);
2389 if (ret) 2405 if (ret)
2390 return ret; 2406 return ret;
2391 2407
@@ -2448,10 +2464,15 @@ int r600_irq_init(struct radeon_device *rdev)
2448 return ret; 2464 return ret;
2449} 2465}
2450 2466
2451void r600_irq_fini(struct radeon_device *rdev) 2467void r600_irq_suspend(struct radeon_device *rdev)
2452{ 2468{
2453 r600_disable_interrupts(rdev); 2469 r600_disable_interrupts(rdev);
2454 r600_rlc_stop(rdev); 2470 r600_rlc_stop(rdev);
2471}
2472
2473void r600_irq_fini(struct radeon_device *rdev)
2474{
2475 r600_irq_suspend(rdev);
2455 r600_ih_ring_fini(rdev); 2476 r600_ih_ring_fini(rdev);
2456} 2477}
2457 2478
@@ -2461,9 +2482,17 @@ int r600_irq_set(struct radeon_device *rdev)
2461 u32 mode_int = 0; 2482 u32 mode_int = 0;
2462 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 2483 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2463 2484
2485 if (!rdev->irq.installed) {
2486 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2487 return -EINVAL;
2488 }
2464 /* don't enable anything if the ih is disabled */ 2489 /* don't enable anything if the ih is disabled */
2465 if (!rdev->ih.enabled) 2490 if (!rdev->ih.enabled) {
2491 r600_disable_interrupts(rdev);
2492 /* force the active interrupt state to all disabled */
2493 r600_disable_interrupt_state(rdev);
2466 return 0; 2494 return 0;
2495 }
2467 2496
2468 if (ASIC_IS_DCE3(rdev)) { 2497 if (ASIC_IS_DCE3(rdev)) {
2469 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2498 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2633,16 +2662,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2633 wptr = RREG32(IH_RB_WPTR); 2662 wptr = RREG32(IH_RB_WPTR);
2634 2663
2635 if (wptr & RB_OVERFLOW) { 2664 if (wptr & RB_OVERFLOW) {
2636 WARN_ON(1); 2665 /* When a ring buffer overflow happen start parsing interrupt
2637 /* XXX deal with overflow */ 2666 * from the last not overwritten vector (wptr + 16). Hopefully
2638 DRM_ERROR("IH RB overflow\n"); 2667 * this should allow us to catchup.
2668 */
2669 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2670 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2671 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2639 tmp = RREG32(IH_RB_CNTL); 2672 tmp = RREG32(IH_RB_CNTL);
2640 tmp |= IH_WPTR_OVERFLOW_CLEAR; 2673 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2641 WREG32(IH_RB_CNTL, tmp); 2674 WREG32(IH_RB_CNTL, tmp);
2642 } 2675 }
2643 wptr = wptr & WPTR_OFFSET_MASK; 2676 return (wptr & rdev->ih.ptr_mask);
2644
2645 return wptr;
2646} 2677}
2647 2678
2648/* r600 IV Ring 2679/* r600 IV Ring
@@ -2678,12 +2709,13 @@ int r600_irq_process(struct radeon_device *rdev)
2678 u32 wptr = r600_get_ih_wptr(rdev); 2709 u32 wptr = r600_get_ih_wptr(rdev);
2679 u32 rptr = rdev->ih.rptr; 2710 u32 rptr = rdev->ih.rptr;
2680 u32 src_id, src_data; 2711 u32 src_id, src_data;
2681 u32 last_entry = rdev->ih.ring_size - 16;
2682 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; 2712 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2683 unsigned long flags; 2713 unsigned long flags;
2684 bool queue_hotplug = false; 2714 bool queue_hotplug = false;
2685 2715
2686 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2716 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2717 if (!rdev->ih.enabled)
2718 return IRQ_NONE;
2687 2719
2688 spin_lock_irqsave(&rdev->ih.lock, flags); 2720 spin_lock_irqsave(&rdev->ih.lock, flags);
2689 2721
@@ -2724,7 +2756,7 @@ restart_ih:
2724 } 2756 }
2725 break; 2757 break;
2726 default: 2758 default:
2727 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 2759 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2728 break; 2760 break;
2729 } 2761 }
2730 break; 2762 break;
@@ -2744,7 +2776,7 @@ restart_ih:
2744 } 2776 }
2745 break; 2777 break;
2746 default: 2778 default:
2747 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 2779 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2748 break; 2780 break;
2749 } 2781 }
2750 break; 2782 break;
@@ -2793,7 +2825,7 @@ restart_ih:
2793 } 2825 }
2794 break; 2826 break;
2795 default: 2827 default:
2796 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 2828 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2797 break; 2829 break;
2798 } 2830 }
2799 break; 2831 break;
@@ -2807,15 +2839,13 @@ restart_ih:
2807 DRM_DEBUG("IH: CP EOP\n"); 2839 DRM_DEBUG("IH: CP EOP\n");
2808 break; 2840 break;
2809 default: 2841 default:
2810 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 2842 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2811 break; 2843 break;
2812 } 2844 }
2813 2845
2814 /* wptr/rptr are in bytes! */ 2846 /* wptr/rptr are in bytes! */
2815 if (rptr == last_entry) 2847 rptr += 16;
2816 rptr = 0; 2848 rptr &= rdev->ih.ptr_mask;
2817 else
2818 rptr += 16;
2819 } 2849 }
2820 /* make sure wptr hasn't changed while processing */ 2850 /* make sure wptr hasn't changed while processing */
2821 wptr = r600_get_ih_wptr(rdev); 2851 wptr = r600_get_ih_wptr(rdev);
@@ -2883,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2883 return 0; 2913 return 0;
2884#endif 2914#endif
2885} 2915}
2916
2917/**
2918 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2919 * rdev: radeon device structure
2920 * bo: buffer object struct which userspace is waiting for idle
2921 *
2922 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2923 * through ring buffer, this leads to corruption in rendering, see
2924 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2925 * directly perform HDP flush by writing register through MMIO.
2926 */
2927void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2928{
2929 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2930}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 99e2c3891a7d..0dcb6904c4ff 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return rdev->family >= CHIP_R600 38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
261 if (!r600_audio_chipset_supported(rdev)) 261 if (!r600_audio_chipset_supported(rdev))
262 return; 262 return;
263 263
264 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
265
266 del_timer(&rdev->audio_timer); 264 del_timer(&rdev->audio_timer);
265 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
267} 266}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 9aecafb51b66..446b765ac72a 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -449,6 +449,7 @@ int r600_blit_init(struct radeon_device *rdev)
449 u32 packet2s[16]; 449 u32 packet2s[16];
450 int num_packet2s = 0; 450 int num_packet2s = 0;
451 451
452 mutex_init(&rdev->r600_blit.mutex);
452 rdev->r600_blit.state_offset = 0; 453 rdev->r600_blit.state_offset = 0;
453 454
454 if (rdev->family >= CHIP_RV770) 455 if (rdev->family >= CHIP_RV770)
@@ -512,14 +513,16 @@ void r600_blit_fini(struct radeon_device *rdev)
512{ 513{
513 int r; 514 int r;
514 515
516 if (rdev->r600_blit.shader_obj == NULL)
517 return;
518 /* If we can't reserve the bo, unref should be enough to destroy
519 * it when it becomes idle.
520 */
515 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 521 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
516 if (unlikely(r != 0)) { 522 if (!r) {
517 dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r); 523 radeon_bo_unpin(rdev->r600_blit.shader_obj);
518 goto out_unref; 524 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
519 } 525 }
520 radeon_bo_unpin(rdev->r600_blit.shader_obj);
521 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
522out_unref:
523 radeon_bo_unref(&rdev->r600_blit.shader_obj); 526 radeon_bo_unref(&rdev->r600_blit.shader_obj);
524} 527}
525 528
@@ -540,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
540void r600_vb_ib_put(struct radeon_device *rdev) 543void r600_vb_ib_put(struct radeon_device *rdev)
541{ 544{
542 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); 545 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
543 mutex_lock(&rdev->ib_pool.mutex);
544 list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
545 mutex_unlock(&rdev->ib_pool.mutex);
546 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 546 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
547} 547}
548 548
@@ -555,7 +555,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
555 int dwords_per_loop = 76, num_loops; 555 int dwords_per_loop = 76, num_loops;
556 556
557 r = r600_vb_ib_get(rdev); 557 r = r600_vb_ib_get(rdev);
558 WARN_ON(r); 558 if (r)
559 return r;
559 560
560 /* set_render_target emits 2 extra dwords on rv6xx */ 561 /* set_render_target emits 2 extra dwords on rv6xx */
561 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) 562 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
@@ -577,11 +578,12 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
577 ring_size = num_loops * dwords_per_loop; 578 ring_size = num_loops * dwords_per_loop;
578 /* set default + shaders */ 579 /* set default + shaders */
579 ring_size += 40; /* shaders + def state */ 580 ring_size += 40; /* shaders + def state */
580 ring_size += 5; /* fence emit for VB IB */ 581 ring_size += 7; /* fence emit for VB IB */
581 ring_size += 5; /* done copy */ 582 ring_size += 5; /* done copy */
582 ring_size += 5; /* fence emit for done copy */ 583 ring_size += 7; /* fence emit for done copy */
583 r = radeon_ring_lock(rdev, ring_size); 584 r = radeon_ring_lock(rdev, ring_size);
584 WARN_ON(r); 585 if (r)
586 return r;
585 587
586 set_default_state(rdev); /* 14 */ 588 set_default_state(rdev); /* 14 */
587 set_shaders(rdev); /* 26 */ 589 set_shaders(rdev); /* 26 */
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 44060b92d9e6..e4c45ec16507 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
38 38
39struct r600_cs_track {
40 u32 cb_color0_base_last;
41};
42
39/** 43/**
40 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 44 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
41 * @parser: parser structure holding parsing context. 45 * @parser: parser structure holding parsing context.
@@ -177,6 +181,28 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
177} 181}
178 182
179/** 183/**
184 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
185 * @parser: parser structure holding parsing context.
186 *
187 * Check next packet is relocation packet3, do bo validation and compute
188 * GPU offset using the provided start.
189 **/
190static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
191{
192 struct radeon_cs_packet p3reloc;
193 int r;
194
195 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
196 if (r) {
197 return 0;
198 }
199 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
200 return 0;
201 }
202 return 1;
203}
204
205/**
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet 206 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context. 207 * @parser: parser structure holding parsing context.
182 * 208 *
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
337 struct radeon_cs_packet *pkt) 363 struct radeon_cs_packet *pkt)
338{ 364{
339 struct radeon_cs_reloc *reloc; 365 struct radeon_cs_reloc *reloc;
366 struct r600_cs_track *track;
340 volatile u32 *ib; 367 volatile u32 *ib;
341 unsigned idx; 368 unsigned idx;
342 unsigned i; 369 unsigned i;
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
344 int r; 371 int r;
345 u32 idx_value; 372 u32 idx_value;
346 373
374 track = (struct r600_cs_track *)p->track;
347 ib = p->ib->ptr; 375 ib = p->ib->ptr;
348 idx = pkt->idx + 1; 376 idx = pkt->idx + 1;
349 idx_value = radeon_get_ib_value(p, idx); 377 idx_value = radeon_get_ib_value(p, idx);
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
503 for (i = 0; i < pkt->count; i++) { 531 for (i = 0; i < pkt->count; i++) {
504 reg = start_reg + (4 * i); 532 reg = start_reg + (4 * i);
505 switch (reg) { 533 switch (reg) {
534 /* This register were added late, there is userspace
535 * which does provide relocation for those but set
536 * 0 offset. In order to avoid breaking old userspace
537 * we detect this and set address to point to last
538 * CB_COLOR0_BASE, note that if userspace doesn't set
539 * CB_COLOR0_BASE before this register we will report
540 * error. Old userspace always set CB_COLOR0_BASE
541 * before any of this.
542 */
543 case R_0280E0_CB_COLOR0_FRAG:
544 case R_0280E4_CB_COLOR1_FRAG:
545 case R_0280E8_CB_COLOR2_FRAG:
546 case R_0280EC_CB_COLOR3_FRAG:
547 case R_0280F0_CB_COLOR4_FRAG:
548 case R_0280F4_CB_COLOR5_FRAG:
549 case R_0280F8_CB_COLOR6_FRAG:
550 case R_0280FC_CB_COLOR7_FRAG:
551 case R_0280C0_CB_COLOR0_TILE:
552 case R_0280C4_CB_COLOR1_TILE:
553 case R_0280C8_CB_COLOR2_TILE:
554 case R_0280CC_CB_COLOR3_TILE:
555 case R_0280D0_CB_COLOR4_TILE:
556 case R_0280D4_CB_COLOR5_TILE:
557 case R_0280D8_CB_COLOR6_TILE:
558 case R_0280DC_CB_COLOR7_TILE:
559 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
560 if (!track->cb_color0_base_last) {
561 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
562 return -EINVAL;
563 }
564 ib[idx+1+i] = track->cb_color0_base_last;
565 printk_once(KERN_WARNING "radeon: You have old & broken userspace "
566 "please consider updating mesa & xf86-video-ati\n");
567 } else {
568 r = r600_cs_packet_next_reloc(p, &reloc);
569 if (r) {
570 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
571 return -EINVAL;
572 }
573 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
574 }
575 break;
506 case DB_DEPTH_BASE: 576 case DB_DEPTH_BASE:
507 case DB_HTILE_DATA_BASE: 577 case DB_HTILE_DATA_BASE:
508 case CB_COLOR0_BASE: 578 case CB_COLOR0_BASE:
579 r = r600_cs_packet_next_reloc(p, &reloc);
580 if (r) {
581 DRM_ERROR("bad SET_CONTEXT_REG "
582 "0x%04X\n", reg);
583 return -EINVAL;
584 }
585 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
586 track->cb_color0_base_last = ib[idx+1+i];
587 break;
509 case CB_COLOR1_BASE: 588 case CB_COLOR1_BASE:
510 case CB_COLOR2_BASE: 589 case CB_COLOR2_BASE:
511 case CB_COLOR3_BASE: 590 case CB_COLOR3_BASE:
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
678int r600_cs_parse(struct radeon_cs_parser *p) 757int r600_cs_parse(struct radeon_cs_parser *p)
679{ 758{
680 struct radeon_cs_packet pkt; 759 struct radeon_cs_packet pkt;
760 struct r600_cs_track *track;
681 int r; 761 int r;
682 762
763 track = kzalloc(sizeof(*track), GFP_KERNEL);
764 p->track = track;
683 do { 765 do {
684 r = r600_cs_packet_parse(p, &pkt, p->idx); 766 r = r600_cs_packet_parse(p, &pkt, p->idx);
685 if (r) { 767 if (r) {
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
757 /* initialize parser */ 839 /* initialize parser */
758 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 840 memset(&parser, 0, sizeof(struct radeon_cs_parser));
759 parser.filp = filp; 841 parser.filp = filp;
842 parser.dev = &dev->pdev->dev;
760 parser.rdev = NULL; 843 parser.rdev = NULL;
761 parser.family = family; 844 parser.family = family;
762 parser.ib = &fake_ib; 845 parser.ib = &fake_ib;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 05894edadab4..30480881aed1 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -882,4 +882,29 @@
882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) 882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
883 883
884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
885
886#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
887#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
888#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
889#define C_0280E0_BASE_256B 0x00000000
890#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
891#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
892#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
893#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
894#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
895#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
896#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
897#define R_0280C0_CB_COLOR0_TILE 0x0280C0
898#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
899#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
900#define C_0280C0_BASE_256B 0x00000000
901#define R_0280C4_CB_COLOR1_TILE 0x0280C4
902#define R_0280C8_CB_COLOR2_TILE 0x0280C8
903#define R_0280CC_CB_COLOR3_TILE 0x0280CC
904#define R_0280D0_CB_COLOR4_TILE 0x0280D0
905#define R_0280D4_CB_COLOR5_TILE 0x0280D4
906#define R_0280D8_CB_COLOR6_TILE 0x0280D8
907#define R_0280DC_CB_COLOR7_TILE 0x0280DC
908
909
885#endif 910#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 53b55608102b..c0356bb193e5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -96,6 +96,7 @@ extern int radeon_audio;
96 * symbol; 96 * symbol;
97 */ 97 */
98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
99/* RADEON_IB_POOL_SIZE must be a power of 2 */
99#define RADEON_IB_POOL_SIZE 16 100#define RADEON_IB_POOL_SIZE 16
100#define RADEON_DEBUGFS_MAX_NUM_FILES 32 101#define RADEON_DEBUGFS_MAX_NUM_FILES 32
101#define RADEONFB_CONN_LIMIT 4 102#define RADEONFB_CONN_LIMIT 4
@@ -319,10 +320,12 @@ struct radeon_mc {
319 u64 real_vram_size; 320 u64 real_vram_size;
320 int vram_mtrr; 321 int vram_mtrr;
321 bool vram_is_ddr; 322 bool vram_is_ddr;
323 bool igp_sideport_enabled;
322}; 324};
323 325
324int radeon_mc_setup(struct radeon_device *rdev); 326int radeon_mc_setup(struct radeon_device *rdev);
325 327bool radeon_combios_sideport_present(struct radeon_device *rdev);
328bool radeon_atombios_sideport_present(struct radeon_device *rdev);
326 329
327/* 330/*
328 * GPU scratch registers structures, functions & helpers 331 * GPU scratch registers structures, functions & helpers
@@ -361,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
361 */ 364 */
362struct radeon_ib { 365struct radeon_ib {
363 struct list_head list; 366 struct list_head list;
364 unsigned long idx; 367 unsigned idx;
365 uint64_t gpu_addr; 368 uint64_t gpu_addr;
366 struct radeon_fence *fence; 369 struct radeon_fence *fence;
367 uint32_t *ptr; 370 uint32_t *ptr;
368 uint32_t length_dw; 371 uint32_t length_dw;
372 bool free;
369}; 373};
370 374
371/* 375/*
@@ -375,10 +379,9 @@ struct radeon_ib {
375struct radeon_ib_pool { 379struct radeon_ib_pool {
376 struct mutex mutex; 380 struct mutex mutex;
377 struct radeon_bo *robj; 381 struct radeon_bo *robj;
378 struct list_head scheduled_ibs;
379 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 382 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
380 bool ready; 383 bool ready;
381 DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); 384 unsigned head_id;
382}; 385};
383 386
384struct radeon_cp { 387struct radeon_cp {
@@ -408,13 +411,13 @@ struct r600_ih {
408 unsigned wptr_old; 411 unsigned wptr_old;
409 unsigned ring_size; 412 unsigned ring_size;
410 uint64_t gpu_addr; 413 uint64_t gpu_addr;
411 uint32_t align_mask;
412 uint32_t ptr_mask; 414 uint32_t ptr_mask;
413 spinlock_t lock; 415 spinlock_t lock;
414 bool enabled; 416 bool enabled;
415}; 417};
416 418
417struct r600_blit { 419struct r600_blit {
420 struct mutex mutex;
418 struct radeon_bo *shader_obj; 421 struct radeon_bo *shader_obj;
419 u64 shader_gpu_addr; 422 u64 shader_gpu_addr;
420 u32 vs_offset, ps_offset; 423 u32 vs_offset, ps_offset;
@@ -463,6 +466,7 @@ struct radeon_cs_chunk {
463}; 466};
464 467
465struct radeon_cs_parser { 468struct radeon_cs_parser {
469 struct device *dev;
466 struct radeon_device *rdev; 470 struct radeon_device *rdev;
467 struct drm_file *filp; 471 struct drm_file *filp;
468 /* chunks */ 472 /* chunks */
@@ -654,11 +658,17 @@ struct radeon_asic {
654 uint32_t offset, uint32_t obj_size); 658 uint32_t offset, uint32_t obj_size);
655 int (*clear_surface_reg)(struct radeon_device *rdev, int reg); 659 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
656 void (*bandwidth_update)(struct radeon_device *rdev); 660 void (*bandwidth_update)(struct radeon_device *rdev);
657 void (*hdp_flush)(struct radeon_device *rdev);
658 void (*hpd_init)(struct radeon_device *rdev); 661 void (*hpd_init)(struct radeon_device *rdev);
659 void (*hpd_fini)(struct radeon_device *rdev); 662 void (*hpd_fini)(struct radeon_device *rdev);
660 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 663 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
661 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 664 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
665 /* ioctl hw specific callback. Some hw might want to perform special
666 * operation on specific ioctl. For instance on wait idle some hw
667 * might want to perform and HDP flush through MMIO as it seems that
668 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
669 * through ring.
670 */
671 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
662}; 672};
663 673
664/* 674/*
@@ -667,11 +677,14 @@ struct radeon_asic {
667struct r100_asic { 677struct r100_asic {
668 const unsigned *reg_safe_bm; 678 const unsigned *reg_safe_bm;
669 unsigned reg_safe_bm_size; 679 unsigned reg_safe_bm_size;
680 u32 hdp_cntl;
670}; 681};
671 682
672struct r300_asic { 683struct r300_asic {
673 const unsigned *reg_safe_bm; 684 const unsigned *reg_safe_bm;
674 unsigned reg_safe_bm_size; 685 unsigned reg_safe_bm_size;
686 u32 resync_scratch;
687 u32 hdp_cntl;
675}; 688};
676 689
677struct r600_asic { 690struct r600_asic {
@@ -843,7 +856,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
843 856
844static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 857static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
845{ 858{
846 if (reg < 0x10000) 859 if (reg < rdev->rmmio_size)
847 return readl(((void __iomem *)rdev->rmmio) + reg); 860 return readl(((void __iomem *)rdev->rmmio) + reg);
848 else { 861 else {
849 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 862 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -853,7 +866,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
853 866
854static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 867static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
855{ 868{
856 if (reg < 0x10000) 869 if (reg < rdev->rmmio_size)
857 writel(v, ((void __iomem *)rdev->rmmio) + reg); 870 writel(v, ((void __iomem *)rdev->rmmio) + reg);
858 else { 871 else {
859 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 872 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -1007,13 +1020,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1007#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) 1020#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
1008#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) 1021#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
1009#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) 1022#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
1010#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
1011#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) 1023#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
1012#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) 1024#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
1013#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) 1025#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
1014#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) 1026#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
1015 1027
1016/* Common functions */ 1028/* Common functions */
1029/* AGP */
1030extern void radeon_agp_disable(struct radeon_device *rdev);
1017extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1031extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1018extern int radeon_modeset_init(struct radeon_device *rdev); 1032extern int radeon_modeset_init(struct radeon_device *rdev);
1019extern void radeon_modeset_fini(struct radeon_device *rdev); 1033extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1137,6 +1151,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
1137extern void r600_cp_stop(struct radeon_device *rdev); 1151extern void r600_cp_stop(struct radeon_device *rdev);
1138extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1152extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1139extern int r600_cp_resume(struct radeon_device *rdev); 1153extern int r600_cp_resume(struct radeon_device *rdev);
1154extern void r600_cp_fini(struct radeon_device *rdev);
1140extern int r600_count_pipe_bits(uint32_t val); 1155extern int r600_count_pipe_bits(uint32_t val);
1141extern int r600_gart_clear_page(struct radeon_device *rdev, int i); 1156extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
1142extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1157extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1157,7 +1172,8 @@ extern int r600_irq_init(struct radeon_device *rdev);
1157extern void r600_irq_fini(struct radeon_device *rdev); 1172extern void r600_irq_fini(struct radeon_device *rdev);
1158extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1173extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1159extern int r600_irq_set(struct radeon_device *rdev); 1174extern int r600_irq_set(struct radeon_device *rdev);
1160 1175extern void r600_irq_suspend(struct radeon_device *rdev);
1176/* r600 audio */
1161extern int r600_audio_init(struct radeon_device *rdev); 1177extern int r600_audio_init(struct radeon_device *rdev);
1162extern int r600_audio_tmds_index(struct drm_encoder *encoder); 1178extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1163extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); 1179extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 54bf49a6d676..c0681a5556dc 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -144,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev)
144 144
145 ret = drm_agp_info(rdev->ddev, &info); 145 ret = drm_agp_info(rdev->ddev, &info);
146 if (ret) { 146 if (ret) {
147 drm_agp_release(rdev->ddev);
147 DRM_ERROR("Unable to get AGP info: %d\n", ret); 148 DRM_ERROR("Unable to get AGP info: %d\n", ret);
148 return ret; 149 return ret;
149 } 150 }
151
152 if (rdev->ddev->agp->agp_info.aper_size < 32) {
153 drm_agp_release(rdev->ddev);
154 dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
155 "need at least 32M, disabling AGP\n",
156 rdev->ddev->agp->agp_info.aper_size);
157 return -EINVAL;
158 }
159
150 mode.mode = info.mode; 160 mode.mode = info.mode;
151 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; 161 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
152 is_v3 = !!(agp_status & RADEON_AGPv3_MODE); 162 is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
@@ -221,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev)
221 ret = drm_agp_enable(rdev->ddev, mode); 231 ret = drm_agp_enable(rdev->ddev, mode);
222 if (ret) { 232 if (ret) {
223 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); 233 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
234 drm_agp_release(rdev->ddev);
224 return ret; 235 return ret;
225 } 236 }
226 237
@@ -252,10 +263,8 @@ void radeon_agp_resume(struct radeon_device *rdev)
252void radeon_agp_fini(struct radeon_device *rdev) 263void radeon_agp_fini(struct radeon_device *rdev)
253{ 264{
254#if __OS_HAS_AGP 265#if __OS_HAS_AGP
255 if (rdev->flags & RADEON_IS_AGP) { 266 if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
256 if (rdev->ddev->agp && rdev->ddev->agp->acquired) { 267 drm_agp_release(rdev->ddev);
257 drm_agp_release(rdev->ddev);
258 }
259 } 268 }
260#endif 269#endif
261} 270}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index eb29217bbf1d..05ee1aeac3fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -77,7 +77,6 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
77void r100_bandwidth_update(struct radeon_device *rdev); 77void r100_bandwidth_update(struct radeon_device *rdev);
78void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 78void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
79int r100_ring_test(struct radeon_device *rdev); 79int r100_ring_test(struct radeon_device *rdev);
80void r100_hdp_flush(struct radeon_device *rdev);
81void r100_hpd_init(struct radeon_device *rdev); 80void r100_hpd_init(struct radeon_device *rdev);
82void r100_hpd_fini(struct radeon_device *rdev); 81void r100_hpd_fini(struct radeon_device *rdev);
83bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 82bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -114,11 +113,11 @@ static struct radeon_asic r100_asic = {
114 .set_surface_reg = r100_set_surface_reg, 113 .set_surface_reg = r100_set_surface_reg,
115 .clear_surface_reg = r100_clear_surface_reg, 114 .clear_surface_reg = r100_clear_surface_reg,
116 .bandwidth_update = &r100_bandwidth_update, 115 .bandwidth_update = &r100_bandwidth_update,
117 .hdp_flush = &r100_hdp_flush,
118 .hpd_init = &r100_hpd_init, 116 .hpd_init = &r100_hpd_init,
119 .hpd_fini = &r100_hpd_fini, 117 .hpd_fini = &r100_hpd_fini,
120 .hpd_sense = &r100_hpd_sense, 118 .hpd_sense = &r100_hpd_sense,
121 .hpd_set_polarity = &r100_hpd_set_polarity, 119 .hpd_set_polarity = &r100_hpd_set_polarity,
120 .ioctl_wait_idle = NULL,
122}; 121};
123 122
124 123
@@ -174,11 +173,11 @@ static struct radeon_asic r300_asic = {
174 .set_surface_reg = r100_set_surface_reg, 173 .set_surface_reg = r100_set_surface_reg,
175 .clear_surface_reg = r100_clear_surface_reg, 174 .clear_surface_reg = r100_clear_surface_reg,
176 .bandwidth_update = &r100_bandwidth_update, 175 .bandwidth_update = &r100_bandwidth_update,
177 .hdp_flush = &r100_hdp_flush,
178 .hpd_init = &r100_hpd_init, 176 .hpd_init = &r100_hpd_init,
179 .hpd_fini = &r100_hpd_fini, 177 .hpd_fini = &r100_hpd_fini,
180 .hpd_sense = &r100_hpd_sense, 178 .hpd_sense = &r100_hpd_sense,
181 .hpd_set_polarity = &r100_hpd_set_polarity, 179 .hpd_set_polarity = &r100_hpd_set_polarity,
180 .ioctl_wait_idle = NULL,
182}; 181};
183 182
184/* 183/*
@@ -218,11 +217,11 @@ static struct radeon_asic r420_asic = {
218 .set_surface_reg = r100_set_surface_reg, 217 .set_surface_reg = r100_set_surface_reg,
219 .clear_surface_reg = r100_clear_surface_reg, 218 .clear_surface_reg = r100_clear_surface_reg,
220 .bandwidth_update = &r100_bandwidth_update, 219 .bandwidth_update = &r100_bandwidth_update,
221 .hdp_flush = &r100_hdp_flush,
222 .hpd_init = &r100_hpd_init, 220 .hpd_init = &r100_hpd_init,
223 .hpd_fini = &r100_hpd_fini, 221 .hpd_fini = &r100_hpd_fini,
224 .hpd_sense = &r100_hpd_sense, 222 .hpd_sense = &r100_hpd_sense,
225 .hpd_set_polarity = &r100_hpd_set_polarity, 223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
226}; 225};
227 226
228 227
@@ -267,11 +266,11 @@ static struct radeon_asic rs400_asic = {
267 .set_surface_reg = r100_set_surface_reg, 266 .set_surface_reg = r100_set_surface_reg,
268 .clear_surface_reg = r100_clear_surface_reg, 267 .clear_surface_reg = r100_clear_surface_reg,
269 .bandwidth_update = &r100_bandwidth_update, 268 .bandwidth_update = &r100_bandwidth_update,
270 .hdp_flush = &r100_hdp_flush,
271 .hpd_init = &r100_hpd_init, 269 .hpd_init = &r100_hpd_init,
272 .hpd_fini = &r100_hpd_fini, 270 .hpd_fini = &r100_hpd_fini,
273 .hpd_sense = &r100_hpd_sense, 271 .hpd_sense = &r100_hpd_sense,
274 .hpd_set_polarity = &r100_hpd_set_polarity, 272 .hpd_set_polarity = &r100_hpd_set_polarity,
273 .ioctl_wait_idle = NULL,
275}; 274};
276 275
277 276
@@ -324,11 +323,11 @@ static struct radeon_asic rs600_asic = {
324 .set_pcie_lanes = NULL, 323 .set_pcie_lanes = NULL,
325 .set_clock_gating = &radeon_atom_set_clock_gating, 324 .set_clock_gating = &radeon_atom_set_clock_gating,
326 .bandwidth_update = &rs600_bandwidth_update, 325 .bandwidth_update = &rs600_bandwidth_update,
327 .hdp_flush = &r100_hdp_flush,
328 .hpd_init = &rs600_hpd_init, 326 .hpd_init = &rs600_hpd_init,
329 .hpd_fini = &rs600_hpd_fini, 327 .hpd_fini = &rs600_hpd_fini,
330 .hpd_sense = &rs600_hpd_sense, 328 .hpd_sense = &rs600_hpd_sense,
331 .hpd_set_polarity = &rs600_hpd_set_polarity, 329 .hpd_set_polarity = &rs600_hpd_set_polarity,
330 .ioctl_wait_idle = NULL,
332}; 331};
333 332
334 333
@@ -372,11 +371,11 @@ static struct radeon_asic rs690_asic = {
372 .set_surface_reg = r100_set_surface_reg, 371 .set_surface_reg = r100_set_surface_reg,
373 .clear_surface_reg = r100_clear_surface_reg, 372 .clear_surface_reg = r100_clear_surface_reg,
374 .bandwidth_update = &rs690_bandwidth_update, 373 .bandwidth_update = &rs690_bandwidth_update,
375 .hdp_flush = &r100_hdp_flush,
376 .hpd_init = &rs600_hpd_init, 374 .hpd_init = &rs600_hpd_init,
377 .hpd_fini = &rs600_hpd_fini, 375 .hpd_fini = &rs600_hpd_fini,
378 .hpd_sense = &rs600_hpd_sense, 376 .hpd_sense = &rs600_hpd_sense,
379 .hpd_set_polarity = &rs600_hpd_set_polarity, 377 .hpd_set_polarity = &rs600_hpd_set_polarity,
378 .ioctl_wait_idle = NULL,
380}; 379};
381 380
382 381
@@ -424,11 +423,11 @@ static struct radeon_asic rv515_asic = {
424 .set_surface_reg = r100_set_surface_reg, 423 .set_surface_reg = r100_set_surface_reg,
425 .clear_surface_reg = r100_clear_surface_reg, 424 .clear_surface_reg = r100_clear_surface_reg,
426 .bandwidth_update = &rv515_bandwidth_update, 425 .bandwidth_update = &rv515_bandwidth_update,
427 .hdp_flush = &r100_hdp_flush,
428 .hpd_init = &rs600_hpd_init, 426 .hpd_init = &rs600_hpd_init,
429 .hpd_fini = &rs600_hpd_fini, 427 .hpd_fini = &rs600_hpd_fini,
430 .hpd_sense = &rs600_hpd_sense, 428 .hpd_sense = &rs600_hpd_sense,
431 .hpd_set_polarity = &rs600_hpd_set_polarity, 429 .hpd_set_polarity = &rs600_hpd_set_polarity,
430 .ioctl_wait_idle = NULL,
432}; 431};
433 432
434 433
@@ -467,11 +466,11 @@ static struct radeon_asic r520_asic = {
467 .set_surface_reg = r100_set_surface_reg, 466 .set_surface_reg = r100_set_surface_reg,
468 .clear_surface_reg = r100_clear_surface_reg, 467 .clear_surface_reg = r100_clear_surface_reg,
469 .bandwidth_update = &rv515_bandwidth_update, 468 .bandwidth_update = &rv515_bandwidth_update,
470 .hdp_flush = &r100_hdp_flush,
471 .hpd_init = &rs600_hpd_init, 469 .hpd_init = &rs600_hpd_init,
472 .hpd_fini = &rs600_hpd_fini, 470 .hpd_fini = &rs600_hpd_fini,
473 .hpd_sense = &rs600_hpd_sense, 471 .hpd_sense = &rs600_hpd_sense,
474 .hpd_set_polarity = &rs600_hpd_set_polarity, 472 .hpd_set_polarity = &rs600_hpd_set_polarity,
473 .ioctl_wait_idle = NULL,
475}; 474};
476 475
477/* 476/*
@@ -508,12 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
508int r600_copy_blit(struct radeon_device *rdev, 507int r600_copy_blit(struct radeon_device *rdev,
509 uint64_t src_offset, uint64_t dst_offset, 508 uint64_t src_offset, uint64_t dst_offset,
510 unsigned num_pages, struct radeon_fence *fence); 509 unsigned num_pages, struct radeon_fence *fence);
511void r600_hdp_flush(struct radeon_device *rdev);
512void r600_hpd_init(struct radeon_device *rdev); 510void r600_hpd_init(struct radeon_device *rdev);
513void r600_hpd_fini(struct radeon_device *rdev); 511void r600_hpd_fini(struct radeon_device *rdev);
514bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 512bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
515void r600_hpd_set_polarity(struct radeon_device *rdev, 513void r600_hpd_set_polarity(struct radeon_device *rdev,
516 enum radeon_hpd_id hpd); 514 enum radeon_hpd_id hpd);
515extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
517 516
518static struct radeon_asic r600_asic = { 517static struct radeon_asic r600_asic = {
519 .init = &r600_init, 518 .init = &r600_init,
@@ -544,11 +543,11 @@ static struct radeon_asic r600_asic = {
544 .set_surface_reg = r600_set_surface_reg, 543 .set_surface_reg = r600_set_surface_reg,
545 .clear_surface_reg = r600_clear_surface_reg, 544 .clear_surface_reg = r600_clear_surface_reg,
546 .bandwidth_update = &rv515_bandwidth_update, 545 .bandwidth_update = &rv515_bandwidth_update,
547 .hdp_flush = &r600_hdp_flush,
548 .hpd_init = &r600_hpd_init, 546 .hpd_init = &r600_hpd_init,
549 .hpd_fini = &r600_hpd_fini, 547 .hpd_fini = &r600_hpd_fini,
550 .hpd_sense = &r600_hpd_sense, 548 .hpd_sense = &r600_hpd_sense,
551 .hpd_set_polarity = &r600_hpd_set_polarity, 549 .hpd_set_polarity = &r600_hpd_set_polarity,
550 .ioctl_wait_idle = r600_ioctl_wait_idle,
552}; 551};
553 552
554/* 553/*
@@ -589,11 +588,11 @@ static struct radeon_asic rv770_asic = {
589 .set_surface_reg = r600_set_surface_reg, 588 .set_surface_reg = r600_set_surface_reg,
590 .clear_surface_reg = r600_clear_surface_reg, 589 .clear_surface_reg = r600_clear_surface_reg,
591 .bandwidth_update = &rv515_bandwidth_update, 590 .bandwidth_update = &rv515_bandwidth_update,
592 .hdp_flush = &r600_hdp_flush,
593 .hpd_init = &r600_hpd_init, 591 .hpd_init = &r600_hpd_init,
594 .hpd_fini = &r600_hpd_fini, 592 .hpd_fini = &r600_hpd_fini,
595 .hpd_sense = &r600_hpd_sense, 593 .hpd_sense = &r600_hpd_sense,
596 .hpd_set_polarity = &r600_hpd_set_polarity, 594 .hpd_set_polarity = &r600_hpd_set_polarity,
595 .ioctl_wait_idle = r600_ioctl_wait_idle,
597}; 596};
598 597
599#endif 598#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 321044bef71c..2dcda6115874 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -114,6 +114,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
114 i2c.i2c_id = gpio->sucI2cId.ucAccess; 114 i2c.i2c_id = gpio->sucI2cId.ucAccess;
115 115
116 i2c.valid = true; 116 i2c.valid = true;
117 break;
117 } 118 }
118 } 119 }
119 120
@@ -286,6 +287,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
286 *connector_type = DRM_MODE_CONNECTOR_DVID; 287 *connector_type = DRM_MODE_CONNECTOR_DVID;
287 } 288 }
288 289
290 /* XFX Pine Group device rv730 reports no VGA DDC lines
291 * even though they are wired up to record 0x93
292 */
293 if ((dev->pdev->device == 0x9498) &&
294 (dev->pdev->subsystem_vendor == 0x1682) &&
295 (dev->pdev->subsystem_device == 0x2452)) {
296 struct radeon_device *rdev = dev->dev_private;
297 *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
298 }
289 return true; 299 return true;
290} 300}
291 301
@@ -345,7 +355,9 @@ const int object_connector_convert[] = {
345 DRM_MODE_CONNECTOR_Unknown, 355 DRM_MODE_CONNECTOR_Unknown,
346 DRM_MODE_CONNECTOR_Unknown, 356 DRM_MODE_CONNECTOR_Unknown,
347 DRM_MODE_CONNECTOR_Unknown, 357 DRM_MODE_CONNECTOR_Unknown,
348 DRM_MODE_CONNECTOR_DisplayPort 358 DRM_MODE_CONNECTOR_DisplayPort,
359 DRM_MODE_CONNECTOR_eDP,
360 DRM_MODE_CONNECTOR_Unknown
349}; 361};
350 362
351bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) 363bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
@@ -935,6 +947,43 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
935 return false; 947 return false;
936} 948}
937 949
950union igp_info {
951 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
952 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
953};
954
955bool radeon_atombios_sideport_present(struct radeon_device *rdev)
956{
957 struct radeon_mode_info *mode_info = &rdev->mode_info;
958 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
959 union igp_info *igp_info;
960 u8 frev, crev;
961 u16 data_offset;
962
963 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
964 &crev, &data_offset);
965
966 igp_info = (union igp_info *)(mode_info->atom_context->bios +
967 data_offset);
968
969 if (igp_info) {
970 switch (crev) {
971 case 1:
972 if (igp_info->info.ucMemoryType & 0xf0)
973 return true;
974 break;
975 case 2:
976 if (igp_info->info_2.ucMemoryType & 0x0f)
977 return true;
978 break;
979 default:
980 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
981 break;
982 }
983 }
984 return false;
985}
986
938bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, 987bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
939 struct radeon_encoder_int_tmds *tmds) 988 struct radeon_encoder_int_tmds *tmds)
940{ 989{
@@ -1026,6 +1075,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1026 ss->delay = ss_info->asSS_Info[i].ucSS_Delay; 1075 ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
1027 ss->range = ss_info->asSS_Info[i].ucSS_Range; 1076 ss->range = ss_info->asSS_Info[i].ucSS_Range;
1028 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; 1077 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
1078 break;
1029 } 1079 }
1030 } 1080 }
1031 } 1081 }
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b5bc51..7932dc4d6b90 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
65 if (r) { 65 if (r) {
66 goto out_cleanup; 66 goto out_cleanup;
67 } 67 }
68 start_jiffies = jiffies; 68
69 for (i = 0; i < n; i++) { 69 /* r100 doesn't have dma engine so skip the test */
70 r = radeon_fence_create(rdev, &fence); 70 if (rdev->asic->copy_dma) {
71 if (r) { 71
72 goto out_cleanup; 72 start_jiffies = jiffies;
73 for (i = 0; i < n; i++) {
74 r = radeon_fence_create(rdev, &fence);
75 if (r) {
76 goto out_cleanup;
77 }
78
79 r = radeon_copy_dma(rdev, saddr, daddr,
80 size / RADEON_GPU_PAGE_SIZE, fence);
81
82 if (r) {
83 goto out_cleanup;
84 }
85 r = radeon_fence_wait(fence, false);
86 if (r) {
87 goto out_cleanup;
88 }
89 radeon_fence_unref(&fence);
73 } 90 }
74 r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); 91 end_jiffies = jiffies;
75 if (r) { 92 time = end_jiffies - start_jiffies;
76 goto out_cleanup; 93 time = jiffies_to_msecs(time);
94 if (time > 0) {
95 i = ((n * size) >> 10) / time;
96 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
97 " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
98 n, size >> 10,
99 sdomain, ddomain, time,
100 i, i * 1000, (i * 1000) / 1024);
77 } 101 }
78 r = radeon_fence_wait(fence, false);
79 if (r) {
80 goto out_cleanup;
81 }
82 radeon_fence_unref(&fence);
83 }
84 end_jiffies = jiffies;
85 time = end_jiffies - start_jiffies;
86 time = jiffies_to_msecs(time);
87 if (time > 0) {
88 i = ((n * size) >> 10) / time;
89 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
90 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
91 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
92 } 102 }
103
93 start_jiffies = jiffies; 104 start_jiffies = jiffies;
94 for (i = 0; i < n; i++) { 105 for (i = 0; i < n; i++) {
95 r = radeon_fence_create(rdev, &fence); 106 r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 812f24dbc2a8..73c4405bf42f 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -56,7 +56,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
56 else if (post_div == 3) 56 else if (post_div == 3)
57 sclk >>= 2; 57 sclk >>= 2;
58 else if (post_div == 4) 58 else if (post_div == 4)
59 sclk >>= 4; 59 sclk >>= 3;
60 60
61 return sclk; 61 return sclk;
62} 62}
@@ -86,7 +86,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
86 else if (post_div == 3) 86 else if (post_div == 3)
87 mclk >>= 2; 87 mclk >>= 2;
88 else if (post_div == 4) 88 else if (post_div == 4)
89 mclk >>= 4; 89 mclk >>= 3;
90 90
91 return mclk; 91 return mclk;
92} 92}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index fd94dbca33ac..e7b19440102e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -595,6 +595,48 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
595 return false; 595 return false;
596} 596}
597 597
598bool radeon_combios_sideport_present(struct radeon_device *rdev)
599{
600 struct drm_device *dev = rdev->ddev;
601 u16 igp_info;
602
603 igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
604
605 if (igp_info) {
606 if (RBIOS16(igp_info + 0x4))
607 return true;
608 }
609 return false;
610}
611
612static const uint32_t default_primarydac_adj[CHIP_LAST] = {
613 0x00000808, /* r100 */
614 0x00000808, /* rv100 */
615 0x00000808, /* rs100 */
616 0x00000808, /* rv200 */
617 0x00000808, /* rs200 */
618 0x00000808, /* r200 */
619 0x00000808, /* rv250 */
620 0x00000000, /* rs300 */
621 0x00000808, /* rv280 */
622 0x00000808, /* r300 */
623 0x00000808, /* r350 */
624 0x00000808, /* rv350 */
625 0x00000808, /* rv380 */
626 0x00000808, /* r420 */
627 0x00000808, /* r423 */
628 0x00000808, /* rv410 */
629 0x00000000, /* rs400 */
630 0x00000000, /* rs480 */
631};
632
633static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
634 struct radeon_encoder_primary_dac *p_dac)
635{
636 p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
637 return;
638}
639
598struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct 640struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
599 radeon_encoder 641 radeon_encoder
600 *encoder) 642 *encoder)
@@ -604,20 +646,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
604 uint16_t dac_info; 646 uint16_t dac_info;
605 uint8_t rev, bg, dac; 647 uint8_t rev, bg, dac;
606 struct radeon_encoder_primary_dac *p_dac = NULL; 648 struct radeon_encoder_primary_dac *p_dac = NULL;
649 int found = 0;
607 650
608 if (rdev->bios == NULL) 651 p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
652 GFP_KERNEL);
653
654 if (!p_dac)
609 return NULL; 655 return NULL;
610 656
657 if (rdev->bios == NULL)
658 goto out;
659
611 /* check CRT table */ 660 /* check CRT table */
612 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); 661 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
613 if (dac_info) { 662 if (dac_info) {
614 p_dac =
615 kzalloc(sizeof(struct radeon_encoder_primary_dac),
616 GFP_KERNEL);
617
618 if (!p_dac)
619 return NULL;
620
621 rev = RBIOS8(dac_info) & 0x3; 663 rev = RBIOS8(dac_info) & 0x3;
622 if (rev < 2) { 664 if (rev < 2) {
623 bg = RBIOS8(dac_info + 0x2) & 0xf; 665 bg = RBIOS8(dac_info + 0x2) & 0xf;
@@ -628,9 +670,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
628 dac = RBIOS8(dac_info + 0x3) & 0xf; 670 dac = RBIOS8(dac_info + 0x3) & 0xf;
629 p_dac->ps2_pdac_adj = (bg << 8) | (dac); 671 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
630 } 672 }
631 673 found = 1;
632 } 674 }
633 675
676out:
677 if (!found) /* fallback to defaults */
678 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
679
634 return p_dac; 680 return p_dac;
635} 681}
636 682
@@ -641,6 +687,9 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
641 uint16_t tv_info; 687 uint16_t tv_info;
642 enum radeon_tv_std tv_std = TV_STD_NTSC; 688 enum radeon_tv_std tv_std = TV_STD_NTSC;
643 689
690 if (rdev->bios == NULL)
691 return tv_std;
692
644 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); 693 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
645 if (tv_info) { 694 if (tv_info) {
646 if (RBIOS8(tv_info + 6) == 'T') { 695 if (RBIOS8(tv_info + 6) == 'T') {
@@ -922,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
922 lvds->native_mode.vdisplay); 971 lvds->native_mode.vdisplay);
923 972
924 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); 973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
925 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) 974 lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
926 lvds->panel_vcc_delay = 2000;
927 975
928 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); 976 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
929 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; 977 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 20161567dbff..238188540017 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -49,8 +49,10 @@ void radeon_connector_hotplug(struct drm_connector *connector)
49 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 49 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
50 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 50 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
51 51
52 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 52 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
53 if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 53 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
54 if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
55 (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) {
54 if (radeon_dp_needs_link_train(radeon_connector)) { 56 if (radeon_dp_needs_link_train(radeon_connector)) {
55 if (connector->encoder) 57 if (connector->encoder)
56 dp_link_train(connector->encoder, connector); 58 dp_link_train(connector->encoder, connector);
@@ -578,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
578 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 580 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
579 struct drm_encoder *encoder; 581 struct drm_encoder *encoder;
580 struct drm_encoder_helper_funcs *encoder_funcs; 582 struct drm_encoder_helper_funcs *encoder_funcs;
581 bool dret; 583 bool dret = false;
582 enum drm_connector_status ret = connector_status_disconnected; 584 enum drm_connector_status ret = connector_status_disconnected;
583 585
584 encoder = radeon_best_single_encoder(connector); 586 encoder = radeon_best_single_encoder(connector);
585 if (!encoder) 587 if (!encoder)
586 ret = connector_status_disconnected; 588 ret = connector_status_disconnected;
587 589
588 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 590 if (radeon_connector->ddc_bus) {
589 dret = radeon_ddc_probe(radeon_connector); 591 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
590 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 592 dret = radeon_ddc_probe(radeon_connector);
593 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
594 }
591 if (dret) { 595 if (dret) {
592 if (radeon_connector->edid) { 596 if (radeon_connector->edid) {
593 kfree(radeon_connector->edid); 597 kfree(radeon_connector->edid);
@@ -615,7 +619,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
615 ret = connector_status_connected; 619 ret = connector_status_connected;
616 } 620 }
617 } else { 621 } else {
618 if (radeon_connector->dac_load_detect) { 622 if (radeon_connector->dac_load_detect && encoder) {
619 encoder_funcs = encoder->helper_private; 623 encoder_funcs = encoder->helper_private;
620 ret = encoder_funcs->detect(encoder, connector); 624 ret = encoder_funcs->detect(encoder, connector);
621 } 625 }
@@ -738,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
738 struct drm_mode_object *obj; 742 struct drm_mode_object *obj;
739 int i; 743 int i;
740 enum drm_connector_status ret = connector_status_disconnected; 744 enum drm_connector_status ret = connector_status_disconnected;
741 bool dret; 745 bool dret = false;
742 746
743 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 747 if (radeon_connector->ddc_bus) {
744 dret = radeon_ddc_probe(radeon_connector); 748 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
745 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 749 dret = radeon_ddc_probe(radeon_connector);
750 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
751 }
746 if (dret) { 752 if (dret) {
747 if (radeon_connector->edid) { 753 if (radeon_connector->edid) {
748 kfree(radeon_connector->edid); 754 kfree(radeon_connector->edid);
@@ -898,10 +904,18 @@ static void radeon_dvi_force(struct drm_connector *connector)
898static int radeon_dvi_mode_valid(struct drm_connector *connector, 904static int radeon_dvi_mode_valid(struct drm_connector *connector,
899 struct drm_display_mode *mode) 905 struct drm_display_mode *mode)
900{ 906{
907 struct drm_device *dev = connector->dev;
908 struct radeon_device *rdev = dev->dev_private;
901 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 909 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
902 910
903 /* XXX check mode bandwidth */ 911 /* XXX check mode bandwidth */
904 912
913 /* clocks over 135 MHz have heat issues with DVI on RV100 */
914 if (radeon_connector->use_digital &&
915 (rdev->family == CHIP_RV100) &&
916 (mode->clock > 135000))
917 return MODE_CLOCK_HIGH;
918
905 if (radeon_connector->use_digital && (mode->clock > 165000)) { 919 if (radeon_connector->use_digital && (mode->clock > 165000)) {
906 if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) || 920 if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
907 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || 921 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
@@ -967,7 +981,8 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
967 } 981 }
968 982
969 sink_type = radeon_dp_getsinktype(radeon_connector); 983 sink_type = radeon_dp_getsinktype(radeon_connector);
970 if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 984 if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
985 (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
971 if (radeon_dp_getdpcd(radeon_connector)) { 986 if (radeon_dp_getdpcd(radeon_connector)) {
972 radeon_dig_connector->dp_sink_type = sink_type; 987 radeon_dig_connector->dp_sink_type = sink_type;
973 ret = connector_status_connected; 988 ret = connector_status_connected;
@@ -992,7 +1007,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
992 1007
993 /* XXX check mode bandwidth */ 1008 /* XXX check mode bandwidth */
994 1009
995 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) 1010 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
1011 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
996 return radeon_dp_mode_valid_helper(radeon_connector, mode); 1012 return radeon_dp_mode_valid_helper(radeon_connector, mode);
997 else 1013 else
998 return MODE_OK; 1014 return MODE_OK;
@@ -1145,6 +1161,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1145 subpixel_order = SubPixelHorizontalRGB; 1161 subpixel_order = SubPixelHorizontalRGB;
1146 break; 1162 break;
1147 case DRM_MODE_CONNECTOR_DisplayPort: 1163 case DRM_MODE_CONNECTOR_DisplayPort:
1164 case DRM_MODE_CONNECTOR_eDP:
1148 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1165 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1149 if (!radeon_dig_connector) 1166 if (!radeon_dig_connector)
1150 goto failed; 1167 goto failed;
@@ -1157,10 +1174,16 @@ radeon_add_atom_connector(struct drm_device *dev,
1157 goto failed; 1174 goto failed;
1158 if (i2c_bus->valid) { 1175 if (i2c_bus->valid) {
1159 /* add DP i2c bus */ 1176 /* add DP i2c bus */
1160 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1177 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1178 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1179 else
1180 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1161 if (!radeon_dig_connector->dp_i2c_bus) 1181 if (!radeon_dig_connector->dp_i2c_bus)
1162 goto failed; 1182 goto failed;
1163 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); 1183 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1184 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
1185 else
1186 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
1164 if (!radeon_connector->ddc_bus) 1187 if (!radeon_connector->ddc_bus)
1165 goto failed; 1188 goto failed;
1166 } 1189 }
@@ -1324,7 +1347,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1324 radeon_connector->dac_load_detect = false; 1347 radeon_connector->dac_load_detect = false;
1325 drm_connector_attach_property(&radeon_connector->base, 1348 drm_connector_attach_property(&radeon_connector->base,
1326 rdev->mode_info.load_detect_property, 1349 rdev->mode_info.load_detect_property,
1327 1); 1350 radeon_connector->dac_load_detect);
1328 drm_connector_attach_property(&radeon_connector->base, 1351 drm_connector_attach_property(&radeon_connector->base,
1329 rdev->mode_info.tv_std_property, 1352 rdev->mode_info.tv_std_property,
1330 radeon_combios_get_tv_info(rdev)); 1353 radeon_combios_get_tv_info(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 0b2f9c2ad2c1..06123ba31d31 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2145,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
2145 &master_priv->sarea); 2145 &master_priv->sarea);
2146 if (ret) { 2146 if (ret) {
2147 DRM_ERROR("SAREA setup failed\n"); 2147 DRM_ERROR("SAREA setup failed\n");
2148 kfree(master_priv);
2148 return ret; 2149 return ret;
2149 } 2150 }
2150 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); 2151 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 65590a0f1d93..e9d085021c1f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_bo_list_validate(&p->validated, p->ib->fence); 89 return radeon_bo_list_validate(&p->validated);
90} 90}
91 91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{ 189{
190 unsigned i; 190 unsigned i;
191 191
192 if (error) { 192 if (!error && parser->ib) {
193 radeon_bo_list_unvalidate(&parser->validated, 193 radeon_bo_list_fence(&parser->validated, parser->ib->fence);
194 parser->ib->fence);
195 } else {
196 radeon_bo_list_unreserve(&parser->validated);
197 } 194 }
195 radeon_bo_list_unreserve(&parser->validated);
198 for (i = 0; i < parser->nrelocs; i++) { 196 for (i = 0; i < parser->nrelocs; i++) {
199 if (parser->relocs[i].gobj) { 197 if (parser->relocs[i].gobj) {
200 mutex_lock(&parser->rdev->ddev->struct_mutex); 198 mutex_lock(&parser->rdev->ddev->struct_mutex);
@@ -231,6 +229,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
231 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 229 memset(&parser, 0, sizeof(struct radeon_cs_parser));
232 parser.filp = filp; 230 parser.filp = filp;
233 parser.rdev = rdev; 231 parser.rdev = rdev;
232 parser.dev = rdev->dev;
234 r = radeon_cs_parser_init(&parser, data); 233 r = radeon_cs_parser_init(&parser, data);
235 if (r) { 234 if (r) {
236 DRM_ERROR("Failed to initialize parser !\n"); 235 DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7c6848096bcd..768b1509fa03 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -544,6 +544,7 @@ void radeon_agp_disable(struct radeon_device *rdev)
544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
545 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 545 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
546 } 546 }
547 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
547} 548}
548 549
549void radeon_check_arguments(struct radeon_device *rdev) 550void radeon_check_arguments(struct radeon_device *rdev)
@@ -733,16 +734,18 @@ void radeon_device_fini(struct radeon_device *rdev)
733 */ 734 */
734int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 735int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
735{ 736{
736 struct radeon_device *rdev = dev->dev_private; 737 struct radeon_device *rdev;
737 struct drm_crtc *crtc; 738 struct drm_crtc *crtc;
738 int r; 739 int r;
739 740
740 if (dev == NULL || rdev == NULL) { 741 if (dev == NULL || dev->dev_private == NULL) {
741 return -ENODEV; 742 return -ENODEV;
742 } 743 }
743 if (state.event == PM_EVENT_PRETHAW) { 744 if (state.event == PM_EVENT_PRETHAW) {
744 return 0; 745 return 0;
745 } 746 }
747 rdev = dev->dev_private;
748
746 /* unpin the front buffers */ 749 /* unpin the front buffers */
747 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 750 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
748 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 751 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 91d72b70abc9..7e17a362b54b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -234,7 +234,7 @@ static const char *encoder_names[34] = {
234 "INTERNAL_UNIPHY2", 234 "INTERNAL_UNIPHY2",
235}; 235};
236 236
237static const char *connector_names[13] = { 237static const char *connector_names[15] = {
238 "Unknown", 238 "Unknown",
239 "VGA", 239 "VGA",
240 "DVI-I", 240 "DVI-I",
@@ -248,6 +248,8 @@ static const char *connector_names[13] = {
248 "DisplayPort", 248 "DisplayPort",
249 "HDMI-A", 249 "HDMI-A",
250 "HDMI-B", 250 "HDMI-B",
251 "TV",
252 "eDP",
251}; 253};
252 254
253static const char *hpd_names[7] = { 255static const char *hpd_names[7] = {
@@ -276,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
276 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 278 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
277 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
278 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
279 if (radeon_connector->ddc_bus) 281 if (radeon_connector->ddc_bus) {
280 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
281 radeon_connector->ddc_bus->rec.mask_clk_reg, 283 radeon_connector->ddc_bus->rec.mask_clk_reg,
282 radeon_connector->ddc_bus->rec.mask_data_reg, 284 radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -286,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
286 radeon_connector->ddc_bus->rec.en_data_reg, 288 radeon_connector->ddc_bus->rec.en_data_reg,
287 radeon_connector->ddc_bus->rec.y_clk_reg, 289 radeon_connector->ddc_bus->rec.y_clk_reg,
288 radeon_connector->ddc_bus->rec.y_data_reg); 290 radeon_connector->ddc_bus->rec.y_data_reg);
291 } else {
292 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
293 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
294 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
295 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
296 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
297 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
298 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
299 }
289 DRM_INFO(" Encoders:\n"); 300 DRM_INFO(" Encoders:\n");
290 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 301 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
291 radeon_encoder = to_radeon_encoder(encoder); 302 radeon_encoder = to_radeon_encoder(encoder);
@@ -329,8 +340,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
329 ret = radeon_get_atom_connector_info_from_object_table(dev); 340 ret = radeon_get_atom_connector_info_from_object_table(dev);
330 else 341 else
331 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); 342 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
332 } else 343 } else {
333 ret = radeon_get_legacy_connector_info_from_bios(dev); 344 ret = radeon_get_legacy_connector_info_from_bios(dev);
345 if (ret == false)
346 ret = radeon_get_legacy_connector_info_from_table(dev);
347 }
334 } else { 348 } else {
335 if (!ASIC_IS_AVIVO(rdev)) 349 if (!ASIC_IS_AVIVO(rdev))
336 ret = radeon_get_legacy_connector_info_from_table(dev); 350 ret = radeon_get_legacy_connector_info_from_table(dev);
@@ -349,9 +363,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
349{ 363{
350 int ret = 0; 364 int ret = 0;
351 365
352 if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 366 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
367 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
353 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 368 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
354 if (dig->dp_i2c_bus) 369 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
370 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
355 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 371 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
356 } 372 }
357 if (!radeon_connector->ddc_bus) 373 if (!radeon_connector->ddc_bus)
@@ -404,11 +420,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
404 uint32_t *fb_div_p, 420 uint32_t *fb_div_p,
405 uint32_t *frac_fb_div_p, 421 uint32_t *frac_fb_div_p,
406 uint32_t *ref_div_p, 422 uint32_t *ref_div_p,
407 uint32_t *post_div_p, 423 uint32_t *post_div_p)
408 int flags)
409{ 424{
410 uint32_t min_ref_div = pll->min_ref_div; 425 uint32_t min_ref_div = pll->min_ref_div;
411 uint32_t max_ref_div = pll->max_ref_div; 426 uint32_t max_ref_div = pll->max_ref_div;
427 uint32_t min_post_div = pll->min_post_div;
428 uint32_t max_post_div = pll->max_post_div;
412 uint32_t min_fractional_feed_div = 0; 429 uint32_t min_fractional_feed_div = 0;
413 uint32_t max_fractional_feed_div = 0; 430 uint32_t max_fractional_feed_div = 0;
414 uint32_t best_vco = pll->best_vco; 431 uint32_t best_vco = pll->best_vco;
@@ -424,7 +441,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
424 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 441 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
425 freq = freq * 1000; 442 freq = freq * 1000;
426 443
427 if (flags & RADEON_PLL_USE_REF_DIV) 444 if (pll->flags & RADEON_PLL_USE_REF_DIV)
428 min_ref_div = max_ref_div = pll->reference_div; 445 min_ref_div = max_ref_div = pll->reference_div;
429 else { 446 else {
430 while (min_ref_div < max_ref_div-1) { 447 while (min_ref_div < max_ref_div-1) {
@@ -439,19 +456,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
439 } 456 }
440 } 457 }
441 458
442 if (flags & RADEON_PLL_USE_FRAC_FB_DIV) { 459 if (pll->flags & RADEON_PLL_USE_POST_DIV)
460 min_post_div = max_post_div = pll->post_div;
461
462 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
443 min_fractional_feed_div = pll->min_frac_feedback_div; 463 min_fractional_feed_div = pll->min_frac_feedback_div;
444 max_fractional_feed_div = pll->max_frac_feedback_div; 464 max_fractional_feed_div = pll->max_frac_feedback_div;
445 } 465 }
446 466
447 for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { 467 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
448 uint32_t ref_div; 468 uint32_t ref_div;
449 469
450 if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 470 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
451 continue; 471 continue;
452 472
453 /* legacy radeons only have a few post_divs */ 473 /* legacy radeons only have a few post_divs */
454 if (flags & RADEON_PLL_LEGACY) { 474 if (pll->flags & RADEON_PLL_LEGACY) {
455 if ((post_div == 5) || 475 if ((post_div == 5) ||
456 (post_div == 7) || 476 (post_div == 7) ||
457 (post_div == 9) || 477 (post_div == 9) ||
@@ -498,7 +518,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
498 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 518 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
499 current_freq = radeon_div(tmp, ref_div * post_div); 519 current_freq = radeon_div(tmp, ref_div * post_div);
500 520
501 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 521 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
502 error = freq - current_freq; 522 error = freq - current_freq;
503 error = error < 0 ? 0xffffffff : error; 523 error = error < 0 ? 0xffffffff : error;
504 } else 524 } else
@@ -525,12 +545,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
525 best_freq = current_freq; 545 best_freq = current_freq;
526 best_error = error; 546 best_error = error;
527 best_vco_diff = vco_diff; 547 best_vco_diff = vco_diff;
528 } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 548 } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
529 ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 549 ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
530 ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 550 ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
531 ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 551 ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
532 ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 552 ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
533 ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 553 ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
534 best_post_div = post_div; 554 best_post_div = post_div;
535 best_ref_div = ref_div; 555 best_ref_div = ref_div;
536 best_feedback_div = feedback_div; 556 best_feedback_div = feedback_div;
@@ -566,8 +586,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
566 uint32_t *fb_div_p, 586 uint32_t *fb_div_p,
567 uint32_t *frac_fb_div_p, 587 uint32_t *frac_fb_div_p,
568 uint32_t *ref_div_p, 588 uint32_t *ref_div_p,
569 uint32_t *post_div_p, 589 uint32_t *post_div_p)
570 int flags)
571{ 590{
572 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; 591 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
573 fixed20_12 pll_out_max, pll_out_min; 592 fixed20_12 pll_out_max, pll_out_min;
@@ -661,7 +680,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
661 radeonfb_remove(dev, fb); 680 radeonfb_remove(dev, fb);
662 681
663 if (radeon_fb->obj) { 682 if (radeon_fb->obj) {
664 radeon_gem_object_unpin(radeon_fb->obj);
665 mutex_lock(&dev->struct_mutex); 683 mutex_lock(&dev->struct_mutex);
666 drm_gem_object_unreference(radeon_fb->obj); 684 drm_gem_object_unreference(radeon_fb->obj);
667 mutex_unlock(&dev->struct_mutex); 685 mutex_unlock(&dev->struct_mutex);
@@ -709,7 +727,11 @@ radeon_user_framebuffer_create(struct drm_device *dev,
709 struct drm_gem_object *obj; 727 struct drm_gem_object *obj;
710 728
711 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 729 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
712 730 if (obj == NULL) {
731 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
732 "can't create framebuffer\n", mode_cmd->handle);
733 return NULL;
734 }
713 return radeon_framebuffer_create(dev, mode_cmd, obj); 735 return radeon_framebuffer_create(dev, mode_cmd, obj);
714} 736}
715 737
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ccba95f83d11..3c91724457ca 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -156,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
156 return ret; 156 return ret;
157} 157}
158 158
159static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
160{
161 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
162 switch (radeon_encoder->encoder_id) {
163 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
164 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
165 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
166 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
167 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
168 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
169 case ENCODER_OBJECT_ID_INTERNAL_DDI:
170 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
171 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
172 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
173 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
174 return true;
175 default:
176 return false;
177 }
178}
159void 179void
160radeon_link_encoder_connector(struct drm_device *dev) 180radeon_link_encoder_connector(struct drm_device *dev)
161{ 181{
@@ -202,7 +222,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
202 222
203 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 223 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
204 radeon_connector = to_radeon_connector(connector); 224 radeon_connector = to_radeon_connector(connector);
205 if (radeon_encoder->devices & radeon_connector->devices) 225 if (radeon_encoder->active_device & radeon_connector->devices)
206 return connector; 226 return connector;
207 } 227 }
208 return NULL; 228 return NULL;
@@ -596,21 +616,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
596 return ATOM_ENCODER_MODE_LVDS; 616 return ATOM_ENCODER_MODE_LVDS;
597 break; 617 break;
598 case DRM_MODE_CONNECTOR_DisplayPort: 618 case DRM_MODE_CONNECTOR_DisplayPort:
619 case DRM_MODE_CONNECTOR_eDP:
599 radeon_dig_connector = radeon_connector->con_priv; 620 radeon_dig_connector = radeon_connector->con_priv;
600 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) 621 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
622 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
601 return ATOM_ENCODER_MODE_DP; 623 return ATOM_ENCODER_MODE_DP;
602 else if (drm_detect_hdmi_monitor(radeon_connector->edid)) 624 else if (drm_detect_hdmi_monitor(radeon_connector->edid))
603 return ATOM_ENCODER_MODE_HDMI; 625 return ATOM_ENCODER_MODE_HDMI;
604 else 626 else
605 return ATOM_ENCODER_MODE_DVI; 627 return ATOM_ENCODER_MODE_DVI;
606 break; 628 break;
607 case CONNECTOR_DVI_A: 629 case DRM_MODE_CONNECTOR_DVIA:
608 case CONNECTOR_VGA: 630 case DRM_MODE_CONNECTOR_VGA:
609 return ATOM_ENCODER_MODE_CRT; 631 return ATOM_ENCODER_MODE_CRT;
610 break; 632 break;
611 case CONNECTOR_STV: 633 case DRM_MODE_CONNECTOR_Composite:
612 case CONNECTOR_CTV: 634 case DRM_MODE_CONNECTOR_SVIDEO:
613 case CONNECTOR_DIN: 635 case DRM_MODE_CONNECTOR_9PinDIN:
614 /* fix me */ 636 /* fix me */
615 return ATOM_ENCODER_MODE_TV; 637 return ATOM_ENCODER_MODE_TV;
616 /*return ATOM_ENCODER_MODE_CV;*/ 638 /*return ATOM_ENCODER_MODE_CV;*/
@@ -674,31 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
674 696
675 memset(&args, 0, sizeof(args)); 697 memset(&args, 0, sizeof(args));
676 698
677 if (ASIC_IS_DCE32(rdev)) { 699 if (dig->dig_encoder)
678 if (dig->dig_block) 700 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
679 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); 701 else
680 else 702 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
681 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 703 num = dig->dig_encoder + 1;
682 num = dig->dig_block + 1;
683 } else {
684 switch (radeon_encoder->encoder_id) {
685 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
686 /* XXX doesn't really matter which dig encoder we pick as long as it's
687 * not already in use
688 */
689 if (dig_connector->linkb)
690 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
691 else
692 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
693 num = 1;
694 break;
695 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
696 /* Only dig2 encoder can drive LVTMA */
697 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
698 num = 2;
699 break;
700 }
701 }
702 704
703 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 705 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
704 706
@@ -820,7 +822,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
820 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 822 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
821 } 823 }
822 if (ASIC_IS_DCE32(rdev)) { 824 if (ASIC_IS_DCE32(rdev)) {
823 if (dig->dig_block) 825 if (dig->dig_encoder == 1)
824 args.v2.acConfig.ucEncoderSel = 1; 826 args.v2.acConfig.ucEncoderSel = 1;
825 if (dig_connector->linkb) 827 if (dig_connector->linkb)
826 args.v2.acConfig.ucLinkSel = 1; 828 args.v2.acConfig.ucLinkSel = 1;
@@ -847,17 +849,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
847 args.v2.acConfig.fCoherentMode = 1; 849 args.v2.acConfig.fCoherentMode = 1;
848 } 850 }
849 } else { 851 } else {
852
850 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 853 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
851 854
855 if (dig->dig_encoder)
856 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
857 else
858 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
859
852 switch (radeon_encoder->encoder_id) { 860 switch (radeon_encoder->encoder_id) {
853 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 861 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
854 /* XXX doesn't really matter which dig encoder we pick as long as it's
855 * not already in use
856 */
857 if (dig_connector->linkb)
858 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
859 else
860 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
861 if (rdev->flags & RADEON_IS_IGP) { 862 if (rdev->flags & RADEON_IS_IGP) {
862 if (radeon_encoder->pixel_clock > 165000) { 863 if (radeon_encoder->pixel_clock > 165000) {
863 if (dig_connector->igp_lane_info & 0x3) 864 if (dig_connector->igp_lane_info & 0x3)
@@ -876,10 +877,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
876 } 877 }
877 } 878 }
878 break; 879 break;
879 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
880 /* Only dig2 encoder can drive LVTMA */
881 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
882 break;
883 } 880 }
884 881
885 if (radeon_encoder->pixel_clock > 165000) 882 if (radeon_encoder->pixel_clock > 165000)
@@ -1044,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1044 union crtc_sourc_param args; 1041 union crtc_sourc_param args;
1045 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); 1042 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1046 uint8_t frev, crev; 1043 uint8_t frev, crev;
1044 struct radeon_encoder_atom_dig *dig;
1047 1045
1048 memset(&args, 0, sizeof(args)); 1046 memset(&args, 0, sizeof(args));
1049 1047
@@ -1107,40 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1107 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1105 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1108 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1106 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1109 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1107 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1110 if (ASIC_IS_DCE32(rdev)) { 1108 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1111 if (radeon_crtc->crtc_id) 1109 dig = radeon_encoder->enc_priv;
1112 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1110 if (dig->dig_encoder)
1113 else 1111 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1114 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1112 else
1115 } else { 1113 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1116 struct drm_connector *connector;
1117 struct radeon_connector *radeon_connector;
1118 struct radeon_connector_atom_dig *dig_connector;
1119
1120 connector = radeon_get_connector_for_encoder(encoder);
1121 if (!connector)
1122 return;
1123 radeon_connector = to_radeon_connector(connector);
1124 if (!radeon_connector->con_priv)
1125 return;
1126 dig_connector = radeon_connector->con_priv;
1127
1128 /* XXX doesn't really matter which dig encoder we pick as long as it's
1129 * not already in use
1130 */
1131 if (dig_connector->linkb)
1132 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1133 else
1134 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1135 }
1136 break; 1114 break;
1137 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1115 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1138 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; 1116 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
1139 break; 1117 break;
1140 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1141 /* Only dig2 encoder can drive LVTMA */
1142 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1143 break;
1144 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1118 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1145 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 1119 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1146 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; 1120 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
@@ -1200,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1200 } 1174 }
1201} 1175}
1202 1176
1177static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1178{
1179 struct drm_device *dev = encoder->dev;
1180 struct radeon_device *rdev = dev->dev_private;
1181 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1182 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1183 struct drm_encoder *test_encoder;
1184 struct radeon_encoder_atom_dig *dig;
1185 uint32_t dig_enc_in_use = 0;
1186 /* on DCE32 and encoder can driver any block so just crtc id */
1187 if (ASIC_IS_DCE32(rdev)) {
1188 return radeon_crtc->crtc_id;
1189 }
1190
1191 /* on DCE3 - LVTMA can only be driven by DIGB */
1192 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1193 struct radeon_encoder *radeon_test_encoder;
1194
1195 if (encoder == test_encoder)
1196 continue;
1197
1198 if (!radeon_encoder_is_digital(test_encoder))
1199 continue;
1200
1201 radeon_test_encoder = to_radeon_encoder(test_encoder);
1202 dig = radeon_test_encoder->enc_priv;
1203
1204 if (dig->dig_encoder >= 0)
1205 dig_enc_in_use |= (1 << dig->dig_encoder);
1206 }
1207
1208 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
1209 if (dig_enc_in_use & 0x2)
1210 DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
1211 return 1;
1212 }
1213 if (!(dig_enc_in_use & 1))
1214 return 0;
1215 return 1;
1216}
1217
1203static void 1218static void
1204radeon_atom_encoder_mode_set(struct drm_encoder *encoder, 1219radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1205 struct drm_display_mode *mode, 1220 struct drm_display_mode *mode,
@@ -1212,12 +1227,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1212 1227
1213 if (radeon_encoder->active_device & 1228 if (radeon_encoder->active_device &
1214 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 1229 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
1215 if (radeon_encoder->enc_priv) { 1230 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1216 struct radeon_encoder_atom_dig *dig; 1231 if (dig)
1217 1232 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
1218 dig = radeon_encoder->enc_priv;
1219 dig->dig_block = radeon_crtc->crtc_id;
1220 }
1221 } 1233 }
1222 radeon_encoder->pixel_clock = adjusted_mode->clock; 1234 radeon_encoder->pixel_clock = adjusted_mode->clock;
1223 1235
@@ -1377,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
1377static void radeon_atom_encoder_disable(struct drm_encoder *encoder) 1389static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1378{ 1390{
1379 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1391 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1392 struct radeon_encoder_atom_dig *dig;
1380 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1393 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1394
1395 if (radeon_encoder_is_digital(encoder)) {
1396 dig = radeon_encoder->enc_priv;
1397 dig->dig_encoder = -1;
1398 }
1381 radeon_encoder->active_device = 0; 1399 radeon_encoder->active_device = 0;
1382} 1400}
1383 1401
@@ -1434,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1434 1452
1435 /* coherent mode by default */ 1453 /* coherent mode by default */
1436 dig->coherent_mode = true; 1454 dig->coherent_mode = true;
1455 dig->dig_encoder = -1;
1437 1456
1438 return dig; 1457 return dig;
1439} 1458}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d1b06c..d71e346e9ab5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
248 if (ret) 248 if (ret)
249 goto out_unref; 249 goto out_unref;
250 250
251 memset_io(fbptr, 0xff, aligned_size); 251 memset_io(fbptr, 0x0, aligned_size);
252 252
253 strcpy(info->fix.id, "radeondrmfb"); 253 strcpy(info->fix.id, "radeondrmfb");
254 254
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4cdd8b4f7549..8495d4e32e18 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
140 140
141bool radeon_fence_signaled(struct radeon_fence *fence) 141bool radeon_fence_signaled(struct radeon_fence *fence)
142{ 142{
143 struct radeon_device *rdev = fence->rdev;
144 unsigned long irq_flags; 143 unsigned long irq_flags;
145 bool signaled = false; 144 bool signaled = false;
146 145
147 if (rdev->gpu_lockup) { 146 if (!fence)
148 return true; 147 return true;
149 } 148
150 if (fence == NULL) { 149 if (fence->rdev->gpu_lockup)
151 return true; 150 return true;
152 } 151
153 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 152 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
154 signaled = fence->signaled; 153 signaled = fence->signaled;
155 /* if we are shuting down report all fence as signaled */ 154 /* if we are shuting down report all fence as signaled */
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 60df2d7e7e4c..db8e9a355a01 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -131,7 +131,6 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
131 printk(KERN_ERR "Failed to wait for object !\n"); 131 printk(KERN_ERR "Failed to wait for object !\n");
132 return r; 132 return r;
133 } 133 }
134 radeon_hdp_flush(robj->rdev);
135 } 134 }
136 return 0; 135 return 0;
137} 136}
@@ -309,10 +308,12 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
309 } 308 }
310 robj = gobj->driver_private; 309 robj = gobj->driver_private;
311 r = radeon_bo_wait(robj, NULL, false); 310 r = radeon_bo_wait(robj, NULL, false);
311 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
312 mutex_lock(&dev->struct_mutex); 314 mutex_lock(&dev->struct_mutex);
313 drm_gem_object_unreference(gobj); 315 drm_gem_object_unreference(gobj);
314 mutex_unlock(&dev->struct_mutex); 316 mutex_unlock(&dev->struct_mutex);
315 radeon_hdp_flush(robj->rdev);
316 return r; 317 return r;
317} 318}
318 319
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index b79ecc4a7cc4..2f349a300195 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
289 drm_radeon_irq_emit_t *emit = data; 289 drm_radeon_irq_emit_t *emit = data;
290 int result; 290 int result;
291 291
292 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
293 return -EINVAL;
294
295 LOCK_TEST_WITH_RETURN(dev, file_priv);
296
297 if (!dev_priv) { 292 if (!dev_priv) {
298 DRM_ERROR("called with no initialization\n"); 293 DRM_ERROR("called with no initialization\n");
299 return -EINVAL; 294 return -EINVAL;
300 } 295 }
301 296
297 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
298 return -EINVAL;
299
300 LOCK_TEST_WITH_RETURN(dev, file_priv);
301
302 result = radeon_emit_irq(dev); 302 result = radeon_emit_irq(dev);
303 303
304 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 304 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9223296fe37b..3cfd60fd0083 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -97,6 +97,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
97 rdev->irq.sw_int = false; 97 rdev->irq.sw_int = false;
98 for (i = 0; i < 2; i++) { 98 for (i = 0; i < 2; i++) {
99 rdev->irq.crtc_vblank_int[i] = false; 99 rdev->irq.crtc_vblank_int[i] = false;
100 rdev->irq.hpd[i] = false;
100 } 101 }
101 radeon_irq_set(rdev); 102 radeon_irq_set(rdev);
102} 103}
@@ -128,17 +129,22 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
128 DRM_INFO("radeon: using MSI.\n"); 129 DRM_INFO("radeon: using MSI.\n");
129 } 130 }
130 } 131 }
131 drm_irq_install(rdev->ddev);
132 rdev->irq.installed = true; 132 rdev->irq.installed = true;
133 r = drm_irq_install(rdev->ddev);
134 if (r) {
135 rdev->irq.installed = false;
136 return r;
137 }
133 DRM_INFO("radeon: irq initialized.\n"); 138 DRM_INFO("radeon: irq initialized.\n");
134 return 0; 139 return 0;
135} 140}
136 141
137void radeon_irq_kms_fini(struct radeon_device *rdev) 142void radeon_irq_kms_fini(struct radeon_device *rdev)
138{ 143{
144 drm_vblank_cleanup(rdev->ddev);
139 if (rdev->irq.installed) { 145 if (rdev->irq.installed) {
140 rdev->irq.installed = false;
141 drm_irq_uninstall(rdev->ddev); 146 drm_irq_uninstall(rdev->ddev);
147 rdev->irq.installed = false;
142 if (rdev->msi_enabled) 148 if (rdev->msi_enabled)
143 pci_disable_msi(rdev->pdev); 149 pci_disable_msi(rdev->pdev);
144 } 150 }
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cc27485a07ad..b6d8081e1246 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -339,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
339 } 339 }
340} 340}
341 341
342/* properly set crtc bpp when using atombios */
343void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
344{
345 struct drm_device *dev = crtc->dev;
346 struct radeon_device *rdev = dev->dev_private;
347 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
348 int format;
349 uint32_t crtc_gen_cntl;
350 uint32_t disp_merge_cntl;
351 uint32_t crtc_pitch;
352
353 switch (crtc->fb->bits_per_pixel) {
354 case 8:
355 format = 2;
356 break;
357 case 15: /* 555 */
358 format = 3;
359 break;
360 case 16: /* 565 */
361 format = 4;
362 break;
363 case 24: /* RGB */
364 format = 5;
365 break;
366 case 32: /* xRGB */
367 format = 6;
368 break;
369 default:
370 return;
371 }
372
373 crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
374 ((crtc->fb->bits_per_pixel * 8) - 1)) /
375 (crtc->fb->bits_per_pixel * 8));
376 crtc_pitch |= crtc_pitch << 16;
377
378 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
379
380 switch (radeon_crtc->crtc_id) {
381 case 0:
382 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
383 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
384 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
385
386 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
387 crtc_gen_cntl |= (format << 8);
388 crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
389 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
390 break;
391 case 1:
392 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
393 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
394 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
395
396 crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
397 crtc_gen_cntl |= (format << 8);
398 WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
399 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
400 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
401 break;
402 }
403}
404
405int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 342int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
406 struct drm_framebuffer *old_fb) 343 struct drm_framebuffer *old_fb)
407{ 344{
@@ -755,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
755 uint32_t post_divider = 0; 692 uint32_t post_divider = 0;
756 uint32_t freq = 0; 693 uint32_t freq = 0;
757 uint8_t pll_gain; 694 uint8_t pll_gain;
758 int pll_flags = RADEON_PLL_LEGACY;
759 bool use_bios_divs = false; 695 bool use_bios_divs = false;
760 /* PLL registers */ 696 /* PLL registers */
761 uint32_t pll_ref_div = 0; 697 uint32_t pll_ref_div = 0;
@@ -789,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
789 else 725 else
790 pll = &rdev->clock.p1pll; 726 pll = &rdev->clock.p1pll;
791 727
728 pll->flags = RADEON_PLL_LEGACY;
729
792 if (mode->clock > 200000) /* range limits??? */ 730 if (mode->clock > 200000) /* range limits??? */
793 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 731 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
794 else 732 else
795 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 733 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
796 734
797 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 735 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
798 if (encoder->crtc == crtc) { 736 if (encoder->crtc == crtc) {
@@ -804,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
804 } 742 }
805 743
806 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 744 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
807 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 745 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
808 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { 746 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
809 if (!rdev->is_atom_bios) { 747 if (!rdev->is_atom_bios) {
810 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 748 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -819,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
819 } 757 }
820 } 758 }
821 } 759 }
822 pll_flags |= RADEON_PLL_USE_REF_DIV; 760 pll->flags |= RADEON_PLL_USE_REF_DIV;
823 } 761 }
824 } 762 }
825 } 763 }
@@ -829,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
829 if (!use_bios_divs) { 767 if (!use_bios_divs) {
830 radeon_compute_pll(pll, mode->clock, 768 radeon_compute_pll(pll, mode->clock,
831 &freq, &feedback_div, &frac_fb_div, 769 &freq, &feedback_div, &frac_fb_div,
832 &reference_div, &post_divider, 770 &reference_div, &post_divider);
833 pll_flags);
834 771
835 for (post_div = &post_divs[0]; post_div->divider; ++post_div) { 772 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
836 if (post_div->divider == post_divider) 773 if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 981508ff7037..38e45e231ef5 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -46,6 +46,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
46 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 46 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
47 uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; 47 uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
48 int panel_pwr_delay = 2000; 48 int panel_pwr_delay = 2000;
49 bool is_mac = false;
49 DRM_DEBUG("\n"); 50 DRM_DEBUG("\n");
50 51
51 if (radeon_encoder->enc_priv) { 52 if (radeon_encoder->enc_priv) {
@@ -58,6 +59,15 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
58 } 59 }
59 } 60 }
60 61
62 /* macs (and possibly some x86 oem systems?) wire up LVDS strangely
63 * Taken from radeonfb.
64 */
65 if ((rdev->mode_info.connector_table == CT_IBOOK) ||
66 (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) ||
67 (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) ||
68 (rdev->mode_info.connector_table == CT_POWERBOOK_VGA))
69 is_mac = true;
70
61 switch (mode) { 71 switch (mode) {
62 case DRM_MODE_DPMS_ON: 72 case DRM_MODE_DPMS_ON:
63 disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN); 73 disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
@@ -74,6 +84,8 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
74 84
75 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); 85 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
76 lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON); 86 lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
87 if (is_mac)
88 lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
77 lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS); 89 lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
78 udelay(panel_pwr_delay * 1000); 90 udelay(panel_pwr_delay * 1000);
79 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); 91 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
@@ -85,7 +97,14 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
85 WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); 97 WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
86 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); 98 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
87 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; 99 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
88 lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); 100 if (is_mac) {
101 lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
102 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
103 lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN);
104 } else {
105 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
106 lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
107 }
89 udelay(panel_pwr_delay * 1000); 108 udelay(panel_pwr_delay * 1000);
90 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); 109 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
91 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); 110 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 3a12bb0c0563..417684daef4c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -77,7 +77,7 @@ struct radeon_tv_mode_constants {
77 unsigned pix_to_tv; 77 unsigned pix_to_tv;
78}; 78};
79 79
80static const uint16_t hor_timing_NTSC[] = { 80static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
81 0x0007, 81 0x0007,
82 0x003f, 82 0x003f,
83 0x0263, 83 0x0263,
@@ -98,7 +98,7 @@ static const uint16_t hor_timing_NTSC[] = {
98 0 98 0
99}; 99};
100 100
101static const uint16_t vert_timing_NTSC[] = { 101static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
102 0x2001, 102 0x2001,
103 0x200d, 103 0x200d,
104 0x1006, 104 0x1006,
@@ -115,7 +115,7 @@ static const uint16_t vert_timing_NTSC[] = {
115 0 115 0
116}; 116};
117 117
118static const uint16_t hor_timing_PAL[] = { 118static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
119 0x0007, 119 0x0007,
120 0x0058, 120 0x0058,
121 0x027c, 121 0x027c,
@@ -136,7 +136,7 @@ static const uint16_t hor_timing_PAL[] = {
136 0 136 0
137}; 137};
138 138
139static const uint16_t vert_timing_PAL[] = { 139static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
140 0x2001, 140 0x2001,
141 0x200c, 141 0x200c,
142 0x1005, 142 0x1005,
@@ -623,9 +623,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
623 } 623 }
624 flicker_removal = (tmp + 500) / 1000; 624 flicker_removal = (tmp + 500) / 1000;
625 625
626 if (flicker_removal < 3) 626 if (flicker_removal < 2)
627 flicker_removal = 3; 627 flicker_removal = 2;
628 for (i = 0; i < 6; ++i) { 628 for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
629 if (flicker_removal == SLOPE_limit[i]) 629 if (flicker_removal == SLOPE_limit[i])
630 break; 630 break;
631 } 631 }
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 402369db5ba0..e81b2aeb6a8f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,32 +46,6 @@ struct radeon_device;
46#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) 46#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
47#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base) 47#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
48 48
49enum radeon_connector_type {
50 CONNECTOR_NONE,
51 CONNECTOR_VGA,
52 CONNECTOR_DVI_I,
53 CONNECTOR_DVI_D,
54 CONNECTOR_DVI_A,
55 CONNECTOR_STV,
56 CONNECTOR_CTV,
57 CONNECTOR_LVDS,
58 CONNECTOR_DIGITAL,
59 CONNECTOR_SCART,
60 CONNECTOR_HDMI_TYPE_A,
61 CONNECTOR_HDMI_TYPE_B,
62 CONNECTOR_0XC,
63 CONNECTOR_0XD,
64 CONNECTOR_DIN,
65 CONNECTOR_DISPLAY_PORT,
66 CONNECTOR_UNSUPPORTED
67};
68
69enum radeon_dvi_type {
70 DVI_AUTO,
71 DVI_DIGITAL,
72 DVI_ANALOG
73};
74
75enum radeon_rmx_type { 49enum radeon_rmx_type {
76 RMX_OFF, 50 RMX_OFF,
77 RMX_FULL, 51 RMX_FULL,
@@ -151,16 +125,24 @@ struct radeon_tmds_pll {
151#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) 125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
152#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
153#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
128#define RADEON_PLL_USE_POST_DIV (1 << 12)
154 129
155struct radeon_pll { 130struct radeon_pll {
156 uint16_t reference_freq; 131 /* reference frequency */
157 uint16_t reference_div; 132 uint32_t reference_freq;
133
134 /* fixed dividers */
135 uint32_t reference_div;
136 uint32_t post_div;
137
138 /* pll in/out limits */
158 uint32_t pll_in_min; 139 uint32_t pll_in_min;
159 uint32_t pll_in_max; 140 uint32_t pll_in_max;
160 uint32_t pll_out_min; 141 uint32_t pll_out_min;
161 uint32_t pll_out_max; 142 uint32_t pll_out_max;
162 uint16_t xclk; 143 uint32_t best_vco;
163 144
145 /* divider limits */
164 uint32_t min_ref_div; 146 uint32_t min_ref_div;
165 uint32_t max_ref_div; 147 uint32_t max_ref_div;
166 uint32_t min_post_div; 148 uint32_t min_post_div;
@@ -169,7 +151,12 @@ struct radeon_pll {
169 uint32_t max_feedback_div; 151 uint32_t max_feedback_div;
170 uint32_t min_frac_feedback_div; 152 uint32_t min_frac_feedback_div;
171 uint32_t max_frac_feedback_div; 153 uint32_t max_frac_feedback_div;
172 uint32_t best_vco; 154
155 /* flags for the current clock */
156 uint32_t flags;
157
158 /* pll id */
159 uint32_t id;
173}; 160};
174 161
175struct radeon_i2c_chan { 162struct radeon_i2c_chan {
@@ -312,7 +299,7 @@ struct radeon_atom_ss {
312struct radeon_encoder_atom_dig { 299struct radeon_encoder_atom_dig {
313 /* atom dig */ 300 /* atom dig */
314 bool coherent_mode; 301 bool coherent_mode;
315 int dig_block; 302 int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
316 /* atom lvds */ 303 /* atom lvds */
317 uint32_t lvds_misc; 304 uint32_t lvds_misc;
318 uint16_t panel_pwr_delay; 305 uint16_t panel_pwr_delay;
@@ -443,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
443 uint32_t *fb_div_p, 430 uint32_t *fb_div_p,
444 uint32_t *frac_fb_div_p, 431 uint32_t *frac_fb_div_p,
445 uint32_t *ref_div_p, 432 uint32_t *ref_div_p,
446 uint32_t *post_div_p, 433 uint32_t *post_div_p);
447 int flags);
448 434
449extern void radeon_compute_pll_avivo(struct radeon_pll *pll, 435extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
450 uint64_t freq, 436 uint64_t freq,
@@ -452,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
452 uint32_t *fb_div_p, 438 uint32_t *fb_div_p,
453 uint32_t *frac_fb_div_p, 439 uint32_t *frac_fb_div_p,
454 uint32_t *ref_div_p, 440 uint32_t *ref_div_p,
455 uint32_t *post_div_p, 441 uint32_t *post_div_p);
456 int flags);
457 442
458extern void radeon_setup_encoder_clones(struct drm_device *dev); 443extern void radeon_setup_encoder_clones(struct drm_device *dev);
459 444
@@ -479,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
479 464
480extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 465extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
481 struct drm_framebuffer *old_fb); 466 struct drm_framebuffer *old_fb);
482extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
483 467
484extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, 468extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
485 struct drm_file *file_priv, 469 struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d9ffe1f56e8f..f1da370928eb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -220,9 +220,11 @@ int radeon_bo_unpin(struct radeon_bo *bo)
220 220
221int radeon_bo_evict_vram(struct radeon_device *rdev) 221int radeon_bo_evict_vram(struct radeon_device *rdev)
222{ 222{
223 if (rdev->flags & RADEON_IS_IGP) { 223 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
224 /* Useless to evict on IGP chips */ 224 if (0 && (rdev->flags & RADEON_IS_IGP)) {
225 return 0; 225 if (rdev->mc.igp_sideport_enabled == false)
226 /* Useless to evict on IGP chips */
227 return 0;
226 } 228 }
227 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 229 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
228} 230}
@@ -304,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
304 } 306 }
305} 307}
306 308
307int radeon_bo_list_validate(struct list_head *head, void *fence) 309int radeon_bo_list_validate(struct list_head *head)
308{ 310{
309 struct radeon_bo_list *lobj; 311 struct radeon_bo_list *lobj;
310 struct radeon_bo *bo; 312 struct radeon_bo *bo;
311 struct radeon_fence *old_fence = NULL;
312 int r; 313 int r;
313 314
314 r = radeon_bo_list_reserve(head); 315 r = radeon_bo_list_reserve(head);
@@ -332,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
332 } 333 }
333 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 334 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
334 lobj->tiling_flags = bo->tiling_flags; 335 lobj->tiling_flags = bo->tiling_flags;
335 if (fence) {
336 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
337 bo->tbo.sync_obj = radeon_fence_ref(fence);
338 bo->tbo.sync_obj_arg = NULL;
339 }
340 if (old_fence) {
341 radeon_fence_unref(&old_fence);
342 }
343 } 336 }
344 return 0; 337 return 0;
345} 338}
346 339
347void radeon_bo_list_unvalidate(struct list_head *head, void *fence) 340void radeon_bo_list_fence(struct list_head *head, void *fence)
348{ 341{
349 struct radeon_bo_list *lobj; 342 struct radeon_bo_list *lobj;
350 struct radeon_fence *old_fence; 343 struct radeon_bo *bo;
351 344 struct radeon_fence *old_fence = NULL;
352 if (fence) 345
353 list_for_each_entry(lobj, head, list) { 346 list_for_each_entry(lobj, head, list) {
354 old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); 347 bo = lobj->bo;
355 if (old_fence == fence) { 348 spin_lock(&bo->tbo.lock);
356 lobj->bo->tbo.sync_obj = NULL; 349 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
357 radeon_fence_unref(&old_fence); 350 bo->tbo.sync_obj = radeon_fence_ref(fence);
358 } 351 bo->tbo.sync_obj_arg = NULL;
352 spin_unlock(&bo->tbo.lock);
353 if (old_fence) {
354 radeon_fence_unref(&old_fence);
359 } 355 }
360 radeon_bo_list_unreserve(head); 356 }
361} 357}
362 358
363int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 359int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index a02f18011ad1..7ab43de1e244 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
156 struct list_head *head); 156 struct list_head *head);
157extern int radeon_bo_list_reserve(struct list_head *head); 157extern int radeon_bo_list_reserve(struct list_head *head);
158extern void radeon_bo_list_unreserve(struct list_head *head); 158extern void radeon_bo_list_unreserve(struct list_head *head);
159extern int radeon_bo_list_validate(struct list_head *head, void *fence); 159extern int radeon_bo_list_validate(struct list_head *head);
160extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); 160extern void radeon_bo_list_fence(struct list_head *head, void *fence);
161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
162 struct vm_area_struct *vma); 162 struct vm_area_struct *vma);
163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 4d12b2d17b4d..694799f6fac1 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41{ 41{
42 struct radeon_fence *fence; 42 struct radeon_fence *fence;
43 struct radeon_ib *nib; 43 struct radeon_ib *nib;
44 unsigned long i; 44 int r = 0, i, c;
45 int r = 0;
46 45
47 *ib = NULL; 46 *ib = NULL;
48 r = radeon_fence_create(rdev, &fence); 47 r = radeon_fence_create(rdev, &fence);
49 if (r) { 48 if (r) {
50 DRM_ERROR("failed to create fence for new IB\n"); 49 dev_err(rdev->dev, "failed to create fence for new IB\n");
51 return r; 50 return r;
52 } 51 }
53 mutex_lock(&rdev->ib_pool.mutex); 52 mutex_lock(&rdev->ib_pool.mutex);
54 i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 53 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
55 if (i < RADEON_IB_POOL_SIZE) { 54 i &= (RADEON_IB_POOL_SIZE - 1);
56 set_bit(i, rdev->ib_pool.alloc_bm); 55 if (rdev->ib_pool.ibs[i].free) {
57 rdev->ib_pool.ibs[i].length_dw = 0; 56 nib = &rdev->ib_pool.ibs[i];
58 *ib = &rdev->ib_pool.ibs[i]; 57 break;
59 mutex_unlock(&rdev->ib_pool.mutex); 58 }
60 goto out;
61 } 59 }
62 if (list_empty(&rdev->ib_pool.scheduled_ibs)) { 60 if (nib == NULL) {
63 /* we go do nothings here */ 61 /* This should never happen, it means we allocated all
62 * IB and haven't scheduled one yet, return EBUSY to
63 * userspace hoping that on ioctl recall we get better
64 * luck
65 */
66 dev_err(rdev->dev, "no free indirect buffer !\n");
64 mutex_unlock(&rdev->ib_pool.mutex); 67 mutex_unlock(&rdev->ib_pool.mutex);
65 DRM_ERROR("all IB allocated none scheduled.\n"); 68 radeon_fence_unref(&fence);
66 r = -EINVAL; 69 return -EBUSY;
67 goto out;
68 } 70 }
69 /* get the first ib on the scheduled list */ 71 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
70 nib = list_entry(rdev->ib_pool.scheduled_ibs.next, 72 nib->free = false;
71 struct radeon_ib, list); 73 if (nib->fence) {
72 if (nib->fence == NULL) {
73 /* we go do nothings here */
74 mutex_unlock(&rdev->ib_pool.mutex); 74 mutex_unlock(&rdev->ib_pool.mutex);
75 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); 75 r = radeon_fence_wait(nib->fence, false);
76 r = -EINVAL; 76 if (r) {
77 goto out; 77 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
78 } 78 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
79 mutex_unlock(&rdev->ib_pool.mutex); 79 mutex_lock(&rdev->ib_pool.mutex);
80 80 nib->free = true;
81 r = radeon_fence_wait(nib->fence, false); 81 mutex_unlock(&rdev->ib_pool.mutex);
82 if (r) { 82 radeon_fence_unref(&fence);
83 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, 83 return r;
84 (unsigned long)nib->gpu_addr, nib->length_dw); 84 }
85 DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); 85 mutex_lock(&rdev->ib_pool.mutex);
86 goto out;
87 } 86 }
88 radeon_fence_unref(&nib->fence); 87 radeon_fence_unref(&nib->fence);
89 88 nib->fence = fence;
90 nib->length_dw = 0; 89 nib->length_dw = 0;
91
92 /* scheduled list is accessed here */
93 mutex_lock(&rdev->ib_pool.mutex);
94 list_del(&nib->list);
95 INIT_LIST_HEAD(&nib->list);
96 mutex_unlock(&rdev->ib_pool.mutex); 90 mutex_unlock(&rdev->ib_pool.mutex);
97
98 *ib = nib; 91 *ib = nib;
99out: 92 return 0;
100 if (r) {
101 radeon_fence_unref(&fence);
102 } else {
103 (*ib)->fence = fence;
104 }
105 return r;
106} 93}
107 94
108void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 95void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -114,18 +101,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
114 return; 101 return;
115 } 102 }
116 mutex_lock(&rdev->ib_pool.mutex); 103 mutex_lock(&rdev->ib_pool.mutex);
117 if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { 104 tmp->free = true;
118 /* IB is scheduled & not signaled don't do anythings */
119 mutex_unlock(&rdev->ib_pool.mutex);
120 return;
121 }
122 list_del(&tmp->list);
123 INIT_LIST_HEAD(&tmp->list);
124 if (tmp->fence)
125 radeon_fence_unref(&tmp->fence);
126
127 tmp->length_dw = 0;
128 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
129 mutex_unlock(&rdev->ib_pool.mutex); 105 mutex_unlock(&rdev->ib_pool.mutex);
130} 106}
131 107
@@ -135,7 +111,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
135 111
136 if (!ib->length_dw || !rdev->cp.ready) { 112 if (!ib->length_dw || !rdev->cp.ready) {
137 /* TODO: Nothings in the ib we should report. */ 113 /* TODO: Nothings in the ib we should report. */
138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 114 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
139 return -EINVAL; 115 return -EINVAL;
140 } 116 }
141 117
@@ -148,7 +124,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
148 radeon_ring_ib_execute(rdev, ib); 124 radeon_ring_ib_execute(rdev, ib);
149 radeon_fence_emit(rdev, ib->fence); 125 radeon_fence_emit(rdev, ib->fence);
150 mutex_lock(&rdev->ib_pool.mutex); 126 mutex_lock(&rdev->ib_pool.mutex);
151 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 127 /* once scheduled IB is considered free and protected by the fence */
128 ib->free = true;
152 mutex_unlock(&rdev->ib_pool.mutex); 129 mutex_unlock(&rdev->ib_pool.mutex);
153 radeon_ring_unlock_commit(rdev); 130 radeon_ring_unlock_commit(rdev);
154 return 0; 131 return 0;
@@ -164,7 +141,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
164 if (rdev->ib_pool.robj) 141 if (rdev->ib_pool.robj)
165 return 0; 142 return 0;
166 /* Allocate 1M object buffer */ 143 /* Allocate 1M object buffer */
167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 144 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
169 true, RADEON_GEM_DOMAIN_GTT, 145 true, RADEON_GEM_DOMAIN_GTT,
170 &rdev->ib_pool.robj); 146 &rdev->ib_pool.robj);
@@ -195,9 +171,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
195 rdev->ib_pool.ibs[i].ptr = ptr + offset; 171 rdev->ib_pool.ibs[i].ptr = ptr + offset;
196 rdev->ib_pool.ibs[i].idx = i; 172 rdev->ib_pool.ibs[i].idx = i;
197 rdev->ib_pool.ibs[i].length_dw = 0; 173 rdev->ib_pool.ibs[i].length_dw = 0;
198 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); 174 rdev->ib_pool.ibs[i].free = true;
199 } 175 }
200 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 176 rdev->ib_pool.head_id = 0;
201 rdev->ib_pool.ready = true; 177 rdev->ib_pool.ready = true;
202 DRM_INFO("radeon: ib pool ready.\n"); 178 DRM_INFO("radeon: ib pool ready.\n");
203 if (radeon_debugfs_ib_init(rdev)) { 179 if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +190,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
214 return; 190 return;
215 } 191 }
216 mutex_lock(&rdev->ib_pool.mutex); 192 mutex_lock(&rdev->ib_pool.mutex);
217 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
218 if (rdev->ib_pool.robj) { 193 if (rdev->ib_pool.robj) {
219 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 194 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
220 if (likely(r == 0)) { 195 if (likely(r == 0)) {
@@ -363,7 +338,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
363 if (ib == NULL) { 338 if (ib == NULL) {
364 return 0; 339 return 0;
365 } 340 }
366 seq_printf(m, "IB %04lu\n", ib->idx); 341 seq_printf(m, "IB %04u\n", ib->idx);
367 seq_printf(m, "IB fence %p\n", ib->fence); 342 seq_printf(m, "IB fence %p\n", ib->fence);
368 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 343 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
369 for (i = 0; i < ib->length_dw; i++) { 344 for (i = 0; i < ib->length_dw; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3b0c07b444a2..58b5adf974ca 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -215,7 +215,10 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
215 rbo = container_of(bo, struct radeon_bo, tbo); 215 rbo = container_of(bo, struct radeon_bo, tbo);
216 switch (bo->mem.mem_type) { 216 switch (bo->mem.mem_type) {
217 case TTM_PL_VRAM: 217 case TTM_PL_VRAM:
218 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 218 if (rbo->rdev->cp.ready == false)
219 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
220 else
221 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
219 break; 222 break;
220 case TTM_PL_TT: 223 case TTM_PL_TT:
221 default: 224 default:
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
index 6021c8849a16..c29ac434ac9c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r200
+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -91,6 +91,8 @@ r200 0x3294
910x22b8 SE_TCL_TEX_CYL_WRAP_CTL 910x22b8 SE_TCL_TEX_CYL_WRAP_CTL
920x22c0 SE_TCL_UCP_VERT_BLEND_CNTL 920x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
930x22c4 SE_TCL_POINT_SPRITE_CNTL 930x22c4 SE_TCL_POINT_SPRITE_CNTL
940x22d0 SE_PVS_CNTL
950x22d4 SE_PVS_CONST_CNTL
940x2648 RE_POINTSIZE 960x2648 RE_POINTSIZE
950x26c0 RE_TOP_LEFT 970x26c0 RE_TOP_LEFT
960x26c4 RE_MISC 980x26c4 RE_MISC
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
new file mode 100644
index 000000000000..989f7a020832
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -0,0 +1,795 @@
1r420 0x4f60
20x1434 SRC_Y_X
30x1438 DST_Y_X
40x143C DST_HEIGHT_WIDTH
50x146C DP_GUI_MASTER_CNTL
60x1474 BRUSH_Y_X
70x1478 DP_BRUSH_BKGD_CLR
80x147C DP_BRUSH_FRGD_CLR
90x1480 BRUSH_DATA0
100x1484 BRUSH_DATA1
110x1598 DST_WIDTH_HEIGHT
120x15C0 CLR_CMP_CNTL
130x15C4 CLR_CMP_CLR_SRC
140x15C8 CLR_CMP_CLR_DST
150x15CC CLR_CMP_MSK
160x15D8 DP_SRC_FRGD_CLR
170x15DC DP_SRC_BKGD_CLR
180x1600 DST_LINE_START
190x1604 DST_LINE_END
200x1608 DST_LINE_PATCOUNT
210x16C0 DP_CNTL
220x16CC DP_WRITE_MSK
230x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
240x16E8 DEFAULT_SC_BOTTOM_RIGHT
250x16EC SC_TOP_LEFT
260x16F0 SC_BOTTOM_RIGHT
270x16F4 SRC_SC_BOTTOM_RIGHT
280x1714 DSTCACHE_CTLSTAT
290x1720 WAIT_UNTIL
300x172C RBBM_GUICNTL
310x1D98 VAP_VPORT_XSCALE
320x1D9C VAP_VPORT_XOFFSET
330x1DA0 VAP_VPORT_YSCALE
340x1DA4 VAP_VPORT_YOFFSET
350x1DA8 VAP_VPORT_ZSCALE
360x1DAC VAP_VPORT_ZOFFSET
370x2080 VAP_CNTL
380x2090 VAP_OUT_VTX_FMT_0
390x2094 VAP_OUT_VTX_FMT_1
400x20B0 VAP_VTE_CNTL
410x2138 VAP_VF_MIN_VTX_INDX
420x2140 VAP_CNTL_STATUS
430x2150 VAP_PROG_STREAM_CNTL_0
440x2154 VAP_PROG_STREAM_CNTL_1
450x2158 VAP_PROG_STREAM_CNTL_2
460x215C VAP_PROG_STREAM_CNTL_3
470x2160 VAP_PROG_STREAM_CNTL_4
480x2164 VAP_PROG_STREAM_CNTL_5
490x2168 VAP_PROG_STREAM_CNTL_6
500x216C VAP_PROG_STREAM_CNTL_7
510x2180 VAP_VTX_STATE_CNTL
520x2184 VAP_VSM_VTX_ASSM
530x2188 VAP_VTX_STATE_IND_REG_0
540x218C VAP_VTX_STATE_IND_REG_1
550x2190 VAP_VTX_STATE_IND_REG_2
560x2194 VAP_VTX_STATE_IND_REG_3
570x2198 VAP_VTX_STATE_IND_REG_4
580x219C VAP_VTX_STATE_IND_REG_5
590x21A0 VAP_VTX_STATE_IND_REG_6
600x21A4 VAP_VTX_STATE_IND_REG_7
610x21A8 VAP_VTX_STATE_IND_REG_8
620x21AC VAP_VTX_STATE_IND_REG_9
630x21B0 VAP_VTX_STATE_IND_REG_10
640x21B4 VAP_VTX_STATE_IND_REG_11
650x21B8 VAP_VTX_STATE_IND_REG_12
660x21BC VAP_VTX_STATE_IND_REG_13
670x21C0 VAP_VTX_STATE_IND_REG_14
680x21C4 VAP_VTX_STATE_IND_REG_15
690x21DC VAP_PSC_SGN_NORM_CNTL
700x21E0 VAP_PROG_STREAM_CNTL_EXT_0
710x21E4 VAP_PROG_STREAM_CNTL_EXT_1
720x21E8 VAP_PROG_STREAM_CNTL_EXT_2
730x21EC VAP_PROG_STREAM_CNTL_EXT_3
740x21F0 VAP_PROG_STREAM_CNTL_EXT_4
750x21F4 VAP_PROG_STREAM_CNTL_EXT_5
760x21F8 VAP_PROG_STREAM_CNTL_EXT_6
770x21FC VAP_PROG_STREAM_CNTL_EXT_7
780x2200 VAP_PVS_VECTOR_INDX_REG
790x2204 VAP_PVS_VECTOR_DATA_REG
800x2208 VAP_PVS_VECTOR_DATA_REG_128
810x221C VAP_CLIP_CNTL
820x2220 VAP_GB_VERT_CLIP_ADJ
830x2224 VAP_GB_VERT_DISC_ADJ
840x2228 VAP_GB_HORZ_CLIP_ADJ
850x222C VAP_GB_HORZ_DISC_ADJ
860x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
870x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
880x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
890x223C VAP_PVS_FLOW_CNTL_ADDRS_3
900x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
910x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
920x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
930x224C VAP_PVS_FLOW_CNTL_ADDRS_7
940x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
950x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
960x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
970x225C VAP_PVS_FLOW_CNTL_ADDRS_11
980x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
990x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
1000x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
1010x226C VAP_PVS_FLOW_CNTL_ADDRS_15
1020x2284 VAP_PVS_STATE_FLUSH_REG
1030x2288 VAP_PVS_VTX_TIMEOUT_REG
1040x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
1050x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
1060x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
1070x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
1080x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
1090x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
1100x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
1110x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
1120x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
1130x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
1140x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
1150x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
1160x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
1170x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
1180x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
1190x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
1200x22D0 VAP_PVS_CODE_CNTL_0
1210x22D4 VAP_PVS_CONST_CNTL
1220x22D8 VAP_PVS_CODE_CNTL_1
1230x22DC VAP_PVS_FLOW_CNTL_OPC
1240x342C RB2D_DSTCACHE_CTLSTAT
1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE
1280x401C GB_SELECT
1290x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE
1310x4100 TX_INVALTAGS
1320x4200 GA_POINT_S0
1330x4204 GA_POINT_T0
1340x4208 GA_POINT_S1
1350x420C GA_POINT_T1
1360x4214 GA_TRIANGLE_STIPPLE
1370x421C GA_POINT_SIZE
1380x4230 GA_POINT_MINMAX
1390x4234 GA_LINE_CNTL
1400x4238 GA_LINE_STIPPLE_CONFIG
1410x4260 GA_LINE_STIPPLE_VALUE
1420x4264 GA_LINE_S0
1430x4268 GA_LINE_S1
1440x4278 GA_COLOR_CONTROL
1450x427C GA_SOLID_RG
1460x4280 GA_SOLID_BA
1470x4288 GA_POLY_MODE
1480x428C GA_ROUND_MODE
1490x4290 GA_OFFSET
1500x4294 GA_FOG_SCALE
1510x4298 GA_FOG_OFFSET
1520x42A0 SU_TEX_WRAP
1530x42A4 SU_POLY_OFFSET_FRONT_SCALE
1540x42A8 SU_POLY_OFFSET_FRONT_OFFSET
1550x42AC SU_POLY_OFFSET_BACK_SCALE
1560x42B0 SU_POLY_OFFSET_BACK_OFFSET
1570x42B4 SU_POLY_OFFSET_ENABLE
1580x42B8 SU_CULL_MODE
1590x42C0 SU_DEPTH_SCALE
1600x42C4 SU_DEPTH_OFFSET
1610x42C8 SU_REG_DEST
1620x4300 RS_COUNT
1630x4304 RS_INST_COUNT
1640x4310 RS_IP_0
1650x4314 RS_IP_1
1660x4318 RS_IP_2
1670x431C RS_IP_3
1680x4320 RS_IP_4
1690x4324 RS_IP_5
1700x4328 RS_IP_6
1710x432C RS_IP_7
1720x4330 RS_INST_0
1730x4334 RS_INST_1
1740x4338 RS_INST_2
1750x433C RS_INST_3
1760x4340 RS_INST_4
1770x4344 RS_INST_5
1780x4348 RS_INST_6
1790x434C RS_INST_7
1800x4350 RS_INST_8
1810x4354 RS_INST_9
1820x4358 RS_INST_10
1830x435C RS_INST_11
1840x4360 RS_INST_12
1850x4364 RS_INST_13
1860x4368 RS_INST_14
1870x436C RS_INST_15
1880x43A4 SC_HYPERZ_EN
1890x43A8 SC_EDGERULE
1900x43B0 SC_CLIP_0_A
1910x43B4 SC_CLIP_0_B
1920x43B8 SC_CLIP_1_A
1930x43BC SC_CLIP_1_B
1940x43C0 SC_CLIP_2_A
1950x43C4 SC_CLIP_2_B
1960x43C8 SC_CLIP_3_A
1970x43CC SC_CLIP_3_B
1980x43D0 SC_CLIP_RULE
1990x43E0 SC_SCISSOR0
2000x43E8 SC_SCREENDOOR
2010x4440 TX_FILTER1_0
2020x4444 TX_FILTER1_1
2030x4448 TX_FILTER1_2
2040x444C TX_FILTER1_3
2050x4450 TX_FILTER1_4
2060x4454 TX_FILTER1_5
2070x4458 TX_FILTER1_6
2080x445C TX_FILTER1_7
2090x4460 TX_FILTER1_8
2100x4464 TX_FILTER1_9
2110x4468 TX_FILTER1_10
2120x446C TX_FILTER1_11
2130x4470 TX_FILTER1_12
2140x4474 TX_FILTER1_13
2150x4478 TX_FILTER1_14
2160x447C TX_FILTER1_15
2170x4580 TX_CHROMA_KEY_0
2180x4584 TX_CHROMA_KEY_1
2190x4588 TX_CHROMA_KEY_2
2200x458C TX_CHROMA_KEY_3
2210x4590 TX_CHROMA_KEY_4
2220x4594 TX_CHROMA_KEY_5
2230x4598 TX_CHROMA_KEY_6
2240x459C TX_CHROMA_KEY_7
2250x45A0 TX_CHROMA_KEY_8
2260x45A4 TX_CHROMA_KEY_9
2270x45A8 TX_CHROMA_KEY_10
2280x45AC TX_CHROMA_KEY_11
2290x45B0 TX_CHROMA_KEY_12
2300x45B4 TX_CHROMA_KEY_13
2310x45B8 TX_CHROMA_KEY_14
2320x45BC TX_CHROMA_KEY_15
2330x45C0 TX_BORDER_COLOR_0
2340x45C4 TX_BORDER_COLOR_1
2350x45C8 TX_BORDER_COLOR_2
2360x45CC TX_BORDER_COLOR_3
2370x45D0 TX_BORDER_COLOR_4
2380x45D4 TX_BORDER_COLOR_5
2390x45D8 TX_BORDER_COLOR_6
2400x45DC TX_BORDER_COLOR_7
2410x45E0 TX_BORDER_COLOR_8
2420x45E4 TX_BORDER_COLOR_9
2430x45E8 TX_BORDER_COLOR_10
2440x45EC TX_BORDER_COLOR_11
2450x45F0 TX_BORDER_COLOR_12
2460x45F4 TX_BORDER_COLOR_13
2470x45F8 TX_BORDER_COLOR_14
2480x45FC TX_BORDER_COLOR_15
2490x4600 US_CONFIG
2500x4604 US_PIXSIZE
2510x4608 US_CODE_OFFSET
2520x460C US_RESET
2530x4610 US_CODE_ADDR_0
2540x4614 US_CODE_ADDR_1
2550x4618 US_CODE_ADDR_2
2560x461C US_CODE_ADDR_3
2570x4620 US_TEX_INST_0
2580x4624 US_TEX_INST_1
2590x4628 US_TEX_INST_2
2600x462C US_TEX_INST_3
2610x4630 US_TEX_INST_4
2620x4634 US_TEX_INST_5
2630x4638 US_TEX_INST_6
2640x463C US_TEX_INST_7
2650x4640 US_TEX_INST_8
2660x4644 US_TEX_INST_9
2670x4648 US_TEX_INST_10
2680x464C US_TEX_INST_11
2690x4650 US_TEX_INST_12
2700x4654 US_TEX_INST_13
2710x4658 US_TEX_INST_14
2720x465C US_TEX_INST_15
2730x4660 US_TEX_INST_16
2740x4664 US_TEX_INST_17
2750x4668 US_TEX_INST_18
2760x466C US_TEX_INST_19
2770x4670 US_TEX_INST_20
2780x4674 US_TEX_INST_21
2790x4678 US_TEX_INST_22
2800x467C US_TEX_INST_23
2810x4680 US_TEX_INST_24
2820x4684 US_TEX_INST_25
2830x4688 US_TEX_INST_26
2840x468C US_TEX_INST_27
2850x4690 US_TEX_INST_28
2860x4694 US_TEX_INST_29
2870x4698 US_TEX_INST_30
2880x469C US_TEX_INST_31
2890x46A4 US_OUT_FMT_0
2900x46A8 US_OUT_FMT_1
2910x46AC US_OUT_FMT_2
2920x46B0 US_OUT_FMT_3
2930x46B4 US_W_FMT
2940x46B8 US_CODE_BANK
2950x46BC US_CODE_EXT
2960x46C0 US_ALU_RGB_ADDR_0
2970x46C4 US_ALU_RGB_ADDR_1
2980x46C8 US_ALU_RGB_ADDR_2
2990x46CC US_ALU_RGB_ADDR_3
3000x46D0 US_ALU_RGB_ADDR_4
3010x46D4 US_ALU_RGB_ADDR_5
3020x46D8 US_ALU_RGB_ADDR_6
3030x46DC US_ALU_RGB_ADDR_7
3040x46E0 US_ALU_RGB_ADDR_8
3050x46E4 US_ALU_RGB_ADDR_9
3060x46E8 US_ALU_RGB_ADDR_10
3070x46EC US_ALU_RGB_ADDR_11
3080x46F0 US_ALU_RGB_ADDR_12
3090x46F4 US_ALU_RGB_ADDR_13
3100x46F8 US_ALU_RGB_ADDR_14
3110x46FC US_ALU_RGB_ADDR_15
3120x4700 US_ALU_RGB_ADDR_16
3130x4704 US_ALU_RGB_ADDR_17
3140x4708 US_ALU_RGB_ADDR_18
3150x470C US_ALU_RGB_ADDR_19
3160x4710 US_ALU_RGB_ADDR_20
3170x4714 US_ALU_RGB_ADDR_21
3180x4718 US_ALU_RGB_ADDR_22
3190x471C US_ALU_RGB_ADDR_23
3200x4720 US_ALU_RGB_ADDR_24
3210x4724 US_ALU_RGB_ADDR_25
3220x4728 US_ALU_RGB_ADDR_26
3230x472C US_ALU_RGB_ADDR_27
3240x4730 US_ALU_RGB_ADDR_28
3250x4734 US_ALU_RGB_ADDR_29
3260x4738 US_ALU_RGB_ADDR_30
3270x473C US_ALU_RGB_ADDR_31
3280x4740 US_ALU_RGB_ADDR_32
3290x4744 US_ALU_RGB_ADDR_33
3300x4748 US_ALU_RGB_ADDR_34
3310x474C US_ALU_RGB_ADDR_35
3320x4750 US_ALU_RGB_ADDR_36
3330x4754 US_ALU_RGB_ADDR_37
3340x4758 US_ALU_RGB_ADDR_38
3350x475C US_ALU_RGB_ADDR_39
3360x4760 US_ALU_RGB_ADDR_40
3370x4764 US_ALU_RGB_ADDR_41
3380x4768 US_ALU_RGB_ADDR_42
3390x476C US_ALU_RGB_ADDR_43
3400x4770 US_ALU_RGB_ADDR_44
3410x4774 US_ALU_RGB_ADDR_45
3420x4778 US_ALU_RGB_ADDR_46
3430x477C US_ALU_RGB_ADDR_47
3440x4780 US_ALU_RGB_ADDR_48
3450x4784 US_ALU_RGB_ADDR_49
3460x4788 US_ALU_RGB_ADDR_50
3470x478C US_ALU_RGB_ADDR_51
3480x4790 US_ALU_RGB_ADDR_52
3490x4794 US_ALU_RGB_ADDR_53
3500x4798 US_ALU_RGB_ADDR_54
3510x479C US_ALU_RGB_ADDR_55
3520x47A0 US_ALU_RGB_ADDR_56
3530x47A4 US_ALU_RGB_ADDR_57
3540x47A8 US_ALU_RGB_ADDR_58
3550x47AC US_ALU_RGB_ADDR_59
3560x47B0 US_ALU_RGB_ADDR_60
3570x47B4 US_ALU_RGB_ADDR_61
3580x47B8 US_ALU_RGB_ADDR_62
3590x47BC US_ALU_RGB_ADDR_63
3600x47C0 US_ALU_ALPHA_ADDR_0
3610x47C4 US_ALU_ALPHA_ADDR_1
3620x47C8 US_ALU_ALPHA_ADDR_2
3630x47CC US_ALU_ALPHA_ADDR_3
3640x47D0 US_ALU_ALPHA_ADDR_4
3650x47D4 US_ALU_ALPHA_ADDR_5
3660x47D8 US_ALU_ALPHA_ADDR_6
3670x47DC US_ALU_ALPHA_ADDR_7
3680x47E0 US_ALU_ALPHA_ADDR_8
3690x47E4 US_ALU_ALPHA_ADDR_9
3700x47E8 US_ALU_ALPHA_ADDR_10
3710x47EC US_ALU_ALPHA_ADDR_11
3720x47F0 US_ALU_ALPHA_ADDR_12
3730x47F4 US_ALU_ALPHA_ADDR_13
3740x47F8 US_ALU_ALPHA_ADDR_14
3750x47FC US_ALU_ALPHA_ADDR_15
3760x4800 US_ALU_ALPHA_ADDR_16
3770x4804 US_ALU_ALPHA_ADDR_17
3780x4808 US_ALU_ALPHA_ADDR_18
3790x480C US_ALU_ALPHA_ADDR_19
3800x4810 US_ALU_ALPHA_ADDR_20
3810x4814 US_ALU_ALPHA_ADDR_21
3820x4818 US_ALU_ALPHA_ADDR_22
3830x481C US_ALU_ALPHA_ADDR_23
3840x4820 US_ALU_ALPHA_ADDR_24
3850x4824 US_ALU_ALPHA_ADDR_25
3860x4828 US_ALU_ALPHA_ADDR_26
3870x482C US_ALU_ALPHA_ADDR_27
3880x4830 US_ALU_ALPHA_ADDR_28
3890x4834 US_ALU_ALPHA_ADDR_29
3900x4838 US_ALU_ALPHA_ADDR_30
3910x483C US_ALU_ALPHA_ADDR_31
3920x4840 US_ALU_ALPHA_ADDR_32
3930x4844 US_ALU_ALPHA_ADDR_33
3940x4848 US_ALU_ALPHA_ADDR_34
3950x484C US_ALU_ALPHA_ADDR_35
3960x4850 US_ALU_ALPHA_ADDR_36
3970x4854 US_ALU_ALPHA_ADDR_37
3980x4858 US_ALU_ALPHA_ADDR_38
3990x485C US_ALU_ALPHA_ADDR_39
4000x4860 US_ALU_ALPHA_ADDR_40
4010x4864 US_ALU_ALPHA_ADDR_41
4020x4868 US_ALU_ALPHA_ADDR_42
4030x486C US_ALU_ALPHA_ADDR_43
4040x4870 US_ALU_ALPHA_ADDR_44
4050x4874 US_ALU_ALPHA_ADDR_45
4060x4878 US_ALU_ALPHA_ADDR_46
4070x487C US_ALU_ALPHA_ADDR_47
4080x4880 US_ALU_ALPHA_ADDR_48
4090x4884 US_ALU_ALPHA_ADDR_49
4100x4888 US_ALU_ALPHA_ADDR_50
4110x488C US_ALU_ALPHA_ADDR_51
4120x4890 US_ALU_ALPHA_ADDR_52
4130x4894 US_ALU_ALPHA_ADDR_53
4140x4898 US_ALU_ALPHA_ADDR_54
4150x489C US_ALU_ALPHA_ADDR_55
4160x48A0 US_ALU_ALPHA_ADDR_56
4170x48A4 US_ALU_ALPHA_ADDR_57
4180x48A8 US_ALU_ALPHA_ADDR_58
4190x48AC US_ALU_ALPHA_ADDR_59
4200x48B0 US_ALU_ALPHA_ADDR_60
4210x48B4 US_ALU_ALPHA_ADDR_61
4220x48B8 US_ALU_ALPHA_ADDR_62
4230x48BC US_ALU_ALPHA_ADDR_63
4240x48C0 US_ALU_RGB_INST_0
4250x48C4 US_ALU_RGB_INST_1
4260x48C8 US_ALU_RGB_INST_2
4270x48CC US_ALU_RGB_INST_3
4280x48D0 US_ALU_RGB_INST_4
4290x48D4 US_ALU_RGB_INST_5
4300x48D8 US_ALU_RGB_INST_6
4310x48DC US_ALU_RGB_INST_7
4320x48E0 US_ALU_RGB_INST_8
4330x48E4 US_ALU_RGB_INST_9
4340x48E8 US_ALU_RGB_INST_10
4350x48EC US_ALU_RGB_INST_11
4360x48F0 US_ALU_RGB_INST_12
4370x48F4 US_ALU_RGB_INST_13
4380x48F8 US_ALU_RGB_INST_14
4390x48FC US_ALU_RGB_INST_15
4400x4900 US_ALU_RGB_INST_16
4410x4904 US_ALU_RGB_INST_17
4420x4908 US_ALU_RGB_INST_18
4430x490C US_ALU_RGB_INST_19
4440x4910 US_ALU_RGB_INST_20
4450x4914 US_ALU_RGB_INST_21
4460x4918 US_ALU_RGB_INST_22
4470x491C US_ALU_RGB_INST_23
4480x4920 US_ALU_RGB_INST_24
4490x4924 US_ALU_RGB_INST_25
4500x4928 US_ALU_RGB_INST_26
4510x492C US_ALU_RGB_INST_27
4520x4930 US_ALU_RGB_INST_28
4530x4934 US_ALU_RGB_INST_29
4540x4938 US_ALU_RGB_INST_30
4550x493C US_ALU_RGB_INST_31
4560x4940 US_ALU_RGB_INST_32
4570x4944 US_ALU_RGB_INST_33
4580x4948 US_ALU_RGB_INST_34
4590x494C US_ALU_RGB_INST_35
4600x4950 US_ALU_RGB_INST_36
4610x4954 US_ALU_RGB_INST_37
4620x4958 US_ALU_RGB_INST_38
4630x495C US_ALU_RGB_INST_39
4640x4960 US_ALU_RGB_INST_40
4650x4964 US_ALU_RGB_INST_41
4660x4968 US_ALU_RGB_INST_42
4670x496C US_ALU_RGB_INST_43
4680x4970 US_ALU_RGB_INST_44
4690x4974 US_ALU_RGB_INST_45
4700x4978 US_ALU_RGB_INST_46
4710x497C US_ALU_RGB_INST_47
4720x4980 US_ALU_RGB_INST_48
4730x4984 US_ALU_RGB_INST_49
4740x4988 US_ALU_RGB_INST_50
4750x498C US_ALU_RGB_INST_51
4760x4990 US_ALU_RGB_INST_52
4770x4994 US_ALU_RGB_INST_53
4780x4998 US_ALU_RGB_INST_54
4790x499C US_ALU_RGB_INST_55
4800x49A0 US_ALU_RGB_INST_56
4810x49A4 US_ALU_RGB_INST_57
4820x49A8 US_ALU_RGB_INST_58
4830x49AC US_ALU_RGB_INST_59
4840x49B0 US_ALU_RGB_INST_60
4850x49B4 US_ALU_RGB_INST_61
4860x49B8 US_ALU_RGB_INST_62
4870x49BC US_ALU_RGB_INST_63
4880x49C0 US_ALU_ALPHA_INST_0
4890x49C4 US_ALU_ALPHA_INST_1
4900x49C8 US_ALU_ALPHA_INST_2
4910x49CC US_ALU_ALPHA_INST_3
4920x49D0 US_ALU_ALPHA_INST_4
4930x49D4 US_ALU_ALPHA_INST_5
4940x49D8 US_ALU_ALPHA_INST_6
4950x49DC US_ALU_ALPHA_INST_7
4960x49E0 US_ALU_ALPHA_INST_8
4970x49E4 US_ALU_ALPHA_INST_9
4980x49E8 US_ALU_ALPHA_INST_10
4990x49EC US_ALU_ALPHA_INST_11
5000x49F0 US_ALU_ALPHA_INST_12
5010x49F4 US_ALU_ALPHA_INST_13
5020x49F8 US_ALU_ALPHA_INST_14
5030x49FC US_ALU_ALPHA_INST_15
5040x4A00 US_ALU_ALPHA_INST_16
5050x4A04 US_ALU_ALPHA_INST_17
5060x4A08 US_ALU_ALPHA_INST_18
5070x4A0C US_ALU_ALPHA_INST_19
5080x4A10 US_ALU_ALPHA_INST_20
5090x4A14 US_ALU_ALPHA_INST_21
5100x4A18 US_ALU_ALPHA_INST_22
5110x4A1C US_ALU_ALPHA_INST_23
5120x4A20 US_ALU_ALPHA_INST_24
5130x4A24 US_ALU_ALPHA_INST_25
5140x4A28 US_ALU_ALPHA_INST_26
5150x4A2C US_ALU_ALPHA_INST_27
5160x4A30 US_ALU_ALPHA_INST_28
5170x4A34 US_ALU_ALPHA_INST_29
5180x4A38 US_ALU_ALPHA_INST_30
5190x4A3C US_ALU_ALPHA_INST_31
5200x4A40 US_ALU_ALPHA_INST_32
5210x4A44 US_ALU_ALPHA_INST_33
5220x4A48 US_ALU_ALPHA_INST_34
5230x4A4C US_ALU_ALPHA_INST_35
5240x4A50 US_ALU_ALPHA_INST_36
5250x4A54 US_ALU_ALPHA_INST_37
5260x4A58 US_ALU_ALPHA_INST_38
5270x4A5C US_ALU_ALPHA_INST_39
5280x4A60 US_ALU_ALPHA_INST_40
5290x4A64 US_ALU_ALPHA_INST_41
5300x4A68 US_ALU_ALPHA_INST_42
5310x4A6C US_ALU_ALPHA_INST_43
5320x4A70 US_ALU_ALPHA_INST_44
5330x4A74 US_ALU_ALPHA_INST_45
5340x4A78 US_ALU_ALPHA_INST_46
5350x4A7C US_ALU_ALPHA_INST_47
5360x4A80 US_ALU_ALPHA_INST_48
5370x4A84 US_ALU_ALPHA_INST_49
5380x4A88 US_ALU_ALPHA_INST_50
5390x4A8C US_ALU_ALPHA_INST_51
5400x4A90 US_ALU_ALPHA_INST_52
5410x4A94 US_ALU_ALPHA_INST_53
5420x4A98 US_ALU_ALPHA_INST_54
5430x4A9C US_ALU_ALPHA_INST_55
5440x4AA0 US_ALU_ALPHA_INST_56
5450x4AA4 US_ALU_ALPHA_INST_57
5460x4AA8 US_ALU_ALPHA_INST_58
5470x4AAC US_ALU_ALPHA_INST_59
5480x4AB0 US_ALU_ALPHA_INST_60
5490x4AB4 US_ALU_ALPHA_INST_61
5500x4AB8 US_ALU_ALPHA_INST_62
5510x4ABC US_ALU_ALPHA_INST_63
5520x4AC0 US_ALU_EXT_ADDR_0
5530x4AC4 US_ALU_EXT_ADDR_1
5540x4AC8 US_ALU_EXT_ADDR_2
5550x4ACC US_ALU_EXT_ADDR_3
5560x4AD0 US_ALU_EXT_ADDR_4
5570x4AD4 US_ALU_EXT_ADDR_5
5580x4AD8 US_ALU_EXT_ADDR_6
5590x4ADC US_ALU_EXT_ADDR_7
5600x4AE0 US_ALU_EXT_ADDR_8
5610x4AE4 US_ALU_EXT_ADDR_9
5620x4AE8 US_ALU_EXT_ADDR_10
5630x4AEC US_ALU_EXT_ADDR_11
5640x4AF0 US_ALU_EXT_ADDR_12
5650x4AF4 US_ALU_EXT_ADDR_13
5660x4AF8 US_ALU_EXT_ADDR_14
5670x4AFC US_ALU_EXT_ADDR_15
5680x4B00 US_ALU_EXT_ADDR_16
5690x4B04 US_ALU_EXT_ADDR_17
5700x4B08 US_ALU_EXT_ADDR_18
5710x4B0C US_ALU_EXT_ADDR_19
5720x4B10 US_ALU_EXT_ADDR_20
5730x4B14 US_ALU_EXT_ADDR_21
5740x4B18 US_ALU_EXT_ADDR_22
5750x4B1C US_ALU_EXT_ADDR_23
5760x4B20 US_ALU_EXT_ADDR_24
5770x4B24 US_ALU_EXT_ADDR_25
5780x4B28 US_ALU_EXT_ADDR_26
5790x4B2C US_ALU_EXT_ADDR_27
5800x4B30 US_ALU_EXT_ADDR_28
5810x4B34 US_ALU_EXT_ADDR_29
5820x4B38 US_ALU_EXT_ADDR_30
5830x4B3C US_ALU_EXT_ADDR_31
5840x4B40 US_ALU_EXT_ADDR_32
5850x4B44 US_ALU_EXT_ADDR_33
5860x4B48 US_ALU_EXT_ADDR_34
5870x4B4C US_ALU_EXT_ADDR_35
5880x4B50 US_ALU_EXT_ADDR_36
5890x4B54 US_ALU_EXT_ADDR_37
5900x4B58 US_ALU_EXT_ADDR_38
5910x4B5C US_ALU_EXT_ADDR_39
5920x4B60 US_ALU_EXT_ADDR_40
5930x4B64 US_ALU_EXT_ADDR_41
5940x4B68 US_ALU_EXT_ADDR_42
5950x4B6C US_ALU_EXT_ADDR_43
5960x4B70 US_ALU_EXT_ADDR_44
5970x4B74 US_ALU_EXT_ADDR_45
5980x4B78 US_ALU_EXT_ADDR_46
5990x4B7C US_ALU_EXT_ADDR_47
6000x4B80 US_ALU_EXT_ADDR_48
6010x4B84 US_ALU_EXT_ADDR_49
6020x4B88 US_ALU_EXT_ADDR_50
6030x4B8C US_ALU_EXT_ADDR_51
6040x4B90 US_ALU_EXT_ADDR_52
6050x4B94 US_ALU_EXT_ADDR_53
6060x4B98 US_ALU_EXT_ADDR_54
6070x4B9C US_ALU_EXT_ADDR_55
6080x4BA0 US_ALU_EXT_ADDR_56
6090x4BA4 US_ALU_EXT_ADDR_57
6100x4BA8 US_ALU_EXT_ADDR_58
6110x4BAC US_ALU_EXT_ADDR_59
6120x4BB0 US_ALU_EXT_ADDR_60
6130x4BB4 US_ALU_EXT_ADDR_61
6140x4BB8 US_ALU_EXT_ADDR_62
6150x4BBC US_ALU_EXT_ADDR_63
6160x4BC0 FG_FOG_BLEND
6170x4BC4 FG_FOG_FACTOR
6180x4BC8 FG_FOG_COLOR_R
6190x4BCC FG_FOG_COLOR_G
6200x4BD0 FG_FOG_COLOR_B
6210x4BD4 FG_ALPHA_FUNC
6220x4BD8 FG_DEPTH_SRC
6230x4C00 US_ALU_CONST_R_0
6240x4C04 US_ALU_CONST_G_0
6250x4C08 US_ALU_CONST_B_0
6260x4C0C US_ALU_CONST_A_0
6270x4C10 US_ALU_CONST_R_1
6280x4C14 US_ALU_CONST_G_1
6290x4C18 US_ALU_CONST_B_1
6300x4C1C US_ALU_CONST_A_1
6310x4C20 US_ALU_CONST_R_2
6320x4C24 US_ALU_CONST_G_2
6330x4C28 US_ALU_CONST_B_2
6340x4C2C US_ALU_CONST_A_2
6350x4C30 US_ALU_CONST_R_3
6360x4C34 US_ALU_CONST_G_3
6370x4C38 US_ALU_CONST_B_3
6380x4C3C US_ALU_CONST_A_3
6390x4C40 US_ALU_CONST_R_4
6400x4C44 US_ALU_CONST_G_4
6410x4C48 US_ALU_CONST_B_4
6420x4C4C US_ALU_CONST_A_4
6430x4C50 US_ALU_CONST_R_5
6440x4C54 US_ALU_CONST_G_5
6450x4C58 US_ALU_CONST_B_5
6460x4C5C US_ALU_CONST_A_5
6470x4C60 US_ALU_CONST_R_6
6480x4C64 US_ALU_CONST_G_6
6490x4C68 US_ALU_CONST_B_6
6500x4C6C US_ALU_CONST_A_6
6510x4C70 US_ALU_CONST_R_7
6520x4C74 US_ALU_CONST_G_7
6530x4C78 US_ALU_CONST_B_7
6540x4C7C US_ALU_CONST_A_7
6550x4C80 US_ALU_CONST_R_8
6560x4C84 US_ALU_CONST_G_8
6570x4C88 US_ALU_CONST_B_8
6580x4C8C US_ALU_CONST_A_8
6590x4C90 US_ALU_CONST_R_9
6600x4C94 US_ALU_CONST_G_9
6610x4C98 US_ALU_CONST_B_9
6620x4C9C US_ALU_CONST_A_9
6630x4CA0 US_ALU_CONST_R_10
6640x4CA4 US_ALU_CONST_G_10
6650x4CA8 US_ALU_CONST_B_10
6660x4CAC US_ALU_CONST_A_10
6670x4CB0 US_ALU_CONST_R_11
6680x4CB4 US_ALU_CONST_G_11
6690x4CB8 US_ALU_CONST_B_11
6700x4CBC US_ALU_CONST_A_11
6710x4CC0 US_ALU_CONST_R_12
6720x4CC4 US_ALU_CONST_G_12
6730x4CC8 US_ALU_CONST_B_12
6740x4CCC US_ALU_CONST_A_12
6750x4CD0 US_ALU_CONST_R_13
6760x4CD4 US_ALU_CONST_G_13
6770x4CD8 US_ALU_CONST_B_13
6780x4CDC US_ALU_CONST_A_13
6790x4CE0 US_ALU_CONST_R_14
6800x4CE4 US_ALU_CONST_G_14
6810x4CE8 US_ALU_CONST_B_14
6820x4CEC US_ALU_CONST_A_14
6830x4CF0 US_ALU_CONST_R_15
6840x4CF4 US_ALU_CONST_G_15
6850x4CF8 US_ALU_CONST_B_15
6860x4CFC US_ALU_CONST_A_15
6870x4D00 US_ALU_CONST_R_16
6880x4D04 US_ALU_CONST_G_16
6890x4D08 US_ALU_CONST_B_16
6900x4D0C US_ALU_CONST_A_16
6910x4D10 US_ALU_CONST_R_17
6920x4D14 US_ALU_CONST_G_17
6930x4D18 US_ALU_CONST_B_17
6940x4D1C US_ALU_CONST_A_17
6950x4D20 US_ALU_CONST_R_18
6960x4D24 US_ALU_CONST_G_18
6970x4D28 US_ALU_CONST_B_18
6980x4D2C US_ALU_CONST_A_18
6990x4D30 US_ALU_CONST_R_19
7000x4D34 US_ALU_CONST_G_19
7010x4D38 US_ALU_CONST_B_19
7020x4D3C US_ALU_CONST_A_19
7030x4D40 US_ALU_CONST_R_20
7040x4D44 US_ALU_CONST_G_20
7050x4D48 US_ALU_CONST_B_20
7060x4D4C US_ALU_CONST_A_20
7070x4D50 US_ALU_CONST_R_21
7080x4D54 US_ALU_CONST_G_21
7090x4D58 US_ALU_CONST_B_21
7100x4D5C US_ALU_CONST_A_21
7110x4D60 US_ALU_CONST_R_22
7120x4D64 US_ALU_CONST_G_22
7130x4D68 US_ALU_CONST_B_22
7140x4D6C US_ALU_CONST_A_22
7150x4D70 US_ALU_CONST_R_23
7160x4D74 US_ALU_CONST_G_23
7170x4D78 US_ALU_CONST_B_23
7180x4D7C US_ALU_CONST_A_23
7190x4D80 US_ALU_CONST_R_24
7200x4D84 US_ALU_CONST_G_24
7210x4D88 US_ALU_CONST_B_24
7220x4D8C US_ALU_CONST_A_24
7230x4D90 US_ALU_CONST_R_25
7240x4D94 US_ALU_CONST_G_25
7250x4D98 US_ALU_CONST_B_25
7260x4D9C US_ALU_CONST_A_25
7270x4DA0 US_ALU_CONST_R_26
7280x4DA4 US_ALU_CONST_G_26
7290x4DA8 US_ALU_CONST_B_26
7300x4DAC US_ALU_CONST_A_26
7310x4DB0 US_ALU_CONST_R_27
7320x4DB4 US_ALU_CONST_G_27
7330x4DB8 US_ALU_CONST_B_27
7340x4DBC US_ALU_CONST_A_27
7350x4DC0 US_ALU_CONST_R_28
7360x4DC4 US_ALU_CONST_G_28
7370x4DC8 US_ALU_CONST_B_28
7380x4DCC US_ALU_CONST_A_28
7390x4DD0 US_ALU_CONST_R_29
7400x4DD4 US_ALU_CONST_G_29
7410x4DD8 US_ALU_CONST_B_29
7420x4DDC US_ALU_CONST_A_29
7430x4DE0 US_ALU_CONST_R_30
7440x4DE4 US_ALU_CONST_G_30
7450x4DE8 US_ALU_CONST_B_30
7460x4DEC US_ALU_CONST_A_30
7470x4DF0 US_ALU_CONST_R_31
7480x4DF4 US_ALU_CONST_G_31
7490x4DF8 US_ALU_CONST_B_31
7500x4DFC US_ALU_CONST_A_31
7510x4E04 RB3D_BLENDCNTL_R3
7520x4E08 RB3D_ABLENDCNTL_R3
7530x4E0C RB3D_COLOR_CHANNEL_MASK
7540x4E10 RB3D_CONSTANT_COLOR
7550x4E14 RB3D_COLOR_CLEAR_VALUE
7560x4E18 RB3D_ROPCNTL_R3
7570x4E1C RB3D_CLRCMP_FLIPE_R3
7580x4E20 RB3D_CLRCMP_CLR_R3
7590x4E24 RB3D_CLRCMP_MSK_R3
7600x4E48 RB3D_DEBUG_CTL
7610x4E4C RB3D_DSTCACHE_CTLSTAT_R3
7620x4E50 RB3D_DITHER_CTL
7630x4E54 RB3D_CMASK_OFFSET0
7640x4E58 RB3D_CMASK_OFFSET1
7650x4E5C RB3D_CMASK_OFFSET2
7660x4E60 RB3D_CMASK_OFFSET3
7670x4E64 RB3D_CMASK_PITCH0
7680x4E68 RB3D_CMASK_PITCH1
7690x4E6C RB3D_CMASK_PITCH2
7700x4E70 RB3D_CMASK_PITCH3
7710x4E74 RB3D_CMASK_WRINDEX
7720x4E78 RB3D_CMASK_DWORD
7730x4E7C RB3D_CMASK_RDINDEX
7740x4E80 RB3D_AARESOLVE_OFFSET
7750x4E84 RB3D_AARESOLVE_PITCH
7760x4E88 RB3D_AARESOLVE_CTL
7770x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7780x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7790x4F04 ZB_ZSTENCILCNTL
7800x4F08 ZB_STENCILREFMASK
7810x4F14 ZB_ZTOP
7820x4F18 ZB_ZCACHE_CTLSTAT
7830x4F1C ZB_BW_CNTL
7840x4F28 ZB_DEPTHCLEARVALUE
7850x4F30 ZB_ZMASK_OFFSET
7860x4F34 ZB_ZMASK_PITCH
7870x4F38 ZB_ZMASK_WRINDEX
7880x4F3C ZB_ZMASK_DWORD
7890x4F40 ZB_ZMASK_RDINDEX
7900x4F44 ZB_HIZ_OFFSET
7910x4F48 ZB_HIZ_WRINDEX
7920x4F4C ZB_HIZ_DWORD
7930x4F50 ZB_HIZ_RDINDEX
7940x4F54 ZB_HIZ_PITCH
7950x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 8e3c0b807add..6801b865d1c4 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -153,7 +153,7 @@ rs600 0x6d40
1530x42A4 SU_POLY_OFFSET_FRONT_SCALE 1530x42A4 SU_POLY_OFFSET_FRONT_SCALE
1540x42A8 SU_POLY_OFFSET_FRONT_OFFSET 1540x42A8 SU_POLY_OFFSET_FRONT_OFFSET
1550x42AC SU_POLY_OFFSET_BACK_SCALE 1550x42AC SU_POLY_OFFSET_BACK_SCALE
1560x42B0 SU_POLY_OFFSET_BACK_OFFSET 1560x42B0 SU_POLY_OFFSET_BACK_OFFSET
1570x42B4 SU_POLY_OFFSET_ENABLE 1570x42B4 SU_POLY_OFFSET_ENABLE
1580x42B8 SU_CULL_MODE 1580x42B8 SU_CULL_MODE
1590x42C0 SU_DEPTH_SCALE 1590x42C0 SU_DEPTH_SCALE
@@ -291,6 +291,8 @@ rs600 0x6d40
2910x46AC US_OUT_FMT_2 2910x46AC US_OUT_FMT_2
2920x46B0 US_OUT_FMT_3 2920x46B0 US_OUT_FMT_3
2930x46B4 US_W_FMT 2930x46B4 US_W_FMT
2940x46B8 US_CODE_BANK
2950x46BC US_CODE_EXT
2940x46C0 US_ALU_RGB_ADDR_0 2960x46C0 US_ALU_RGB_ADDR_0
2950x46C4 US_ALU_RGB_ADDR_1 2970x46C4 US_ALU_RGB_ADDR_1
2960x46C8 US_ALU_RGB_ADDR_2 2980x46C8 US_ALU_RGB_ADDR_2
@@ -547,6 +549,70 @@ rs600 0x6d40
5470x4AB4 US_ALU_ALPHA_INST_61 5490x4AB4 US_ALU_ALPHA_INST_61
5480x4AB8 US_ALU_ALPHA_INST_62 5500x4AB8 US_ALU_ALPHA_INST_62
5490x4ABC US_ALU_ALPHA_INST_63 5510x4ABC US_ALU_ALPHA_INST_63
5520x4AC0 US_ALU_EXT_ADDR_0
5530x4AC4 US_ALU_EXT_ADDR_1
5540x4AC8 US_ALU_EXT_ADDR_2
5550x4ACC US_ALU_EXT_ADDR_3
5560x4AD0 US_ALU_EXT_ADDR_4
5570x4AD4 US_ALU_EXT_ADDR_5
5580x4AD8 US_ALU_EXT_ADDR_6
5590x4ADC US_ALU_EXT_ADDR_7
5600x4AE0 US_ALU_EXT_ADDR_8
5610x4AE4 US_ALU_EXT_ADDR_9
5620x4AE8 US_ALU_EXT_ADDR_10
5630x4AEC US_ALU_EXT_ADDR_11
5640x4AF0 US_ALU_EXT_ADDR_12
5650x4AF4 US_ALU_EXT_ADDR_13
5660x4AF8 US_ALU_EXT_ADDR_14
5670x4AFC US_ALU_EXT_ADDR_15
5680x4B00 US_ALU_EXT_ADDR_16
5690x4B04 US_ALU_EXT_ADDR_17
5700x4B08 US_ALU_EXT_ADDR_18
5710x4B0C US_ALU_EXT_ADDR_19
5720x4B10 US_ALU_EXT_ADDR_20
5730x4B14 US_ALU_EXT_ADDR_21
5740x4B18 US_ALU_EXT_ADDR_22
5750x4B1C US_ALU_EXT_ADDR_23
5760x4B20 US_ALU_EXT_ADDR_24
5770x4B24 US_ALU_EXT_ADDR_25
5780x4B28 US_ALU_EXT_ADDR_26
5790x4B2C US_ALU_EXT_ADDR_27
5800x4B30 US_ALU_EXT_ADDR_28
5810x4B34 US_ALU_EXT_ADDR_29
5820x4B38 US_ALU_EXT_ADDR_30
5830x4B3C US_ALU_EXT_ADDR_31
5840x4B40 US_ALU_EXT_ADDR_32
5850x4B44 US_ALU_EXT_ADDR_33
5860x4B48 US_ALU_EXT_ADDR_34
5870x4B4C US_ALU_EXT_ADDR_35
5880x4B50 US_ALU_EXT_ADDR_36
5890x4B54 US_ALU_EXT_ADDR_37
5900x4B58 US_ALU_EXT_ADDR_38
5910x4B5C US_ALU_EXT_ADDR_39
5920x4B60 US_ALU_EXT_ADDR_40
5930x4B64 US_ALU_EXT_ADDR_41
5940x4B68 US_ALU_EXT_ADDR_42
5950x4B6C US_ALU_EXT_ADDR_43
5960x4B70 US_ALU_EXT_ADDR_44
5970x4B74 US_ALU_EXT_ADDR_45
5980x4B78 US_ALU_EXT_ADDR_46
5990x4B7C US_ALU_EXT_ADDR_47
6000x4B80 US_ALU_EXT_ADDR_48
6010x4B84 US_ALU_EXT_ADDR_49
6020x4B88 US_ALU_EXT_ADDR_50
6030x4B8C US_ALU_EXT_ADDR_51
6040x4B90 US_ALU_EXT_ADDR_52
6050x4B94 US_ALU_EXT_ADDR_53
6060x4B98 US_ALU_EXT_ADDR_54
6070x4B9C US_ALU_EXT_ADDR_55
6080x4BA0 US_ALU_EXT_ADDR_56
6090x4BA4 US_ALU_EXT_ADDR_57
6100x4BA8 US_ALU_EXT_ADDR_58
6110x4BAC US_ALU_EXT_ADDR_59
6120x4BB0 US_ALU_EXT_ADDR_60
6130x4BB4 US_ALU_EXT_ADDR_61
6140x4BB8 US_ALU_EXT_ADDR_62
6150x4BBC US_ALU_EXT_ADDR_63
5500x4BC0 FG_FOG_BLEND 6160x4BC0 FG_FOG_BLEND
5510x4BC4 FG_FOG_FACTOR 6170x4BC4 FG_FOG_FACTOR
5520x4BC8 FG_FOG_COLOR_R 6180x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 0102a0d5735c..38abf63bf2cd 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -161,7 +161,12 @@ rv515 0x6d40
1610x401C GB_SELECT 1610x401C GB_SELECT
1620x4020 GB_AA_CONFIG 1620x4020 GB_AA_CONFIG
1630x4024 GB_FIFO_SIZE 1630x4024 GB_FIFO_SIZE
1640x4028 GB_Z_PEQ_CONFIG
1640x4100 TX_INVALTAGS 1650x4100 TX_INVALTAGS
1660x4114 SU_TEX_WRAP_PS3
1670x4118 PS3_ENABLE
1680x411c PS3_VTX_FMT
1690x4120 PS3_TEX_SOURCE
1650x4200 GA_POINT_S0 1700x4200 GA_POINT_S0
1660x4204 GA_POINT_T0 1710x4204 GA_POINT_T0
1670x4208 GA_POINT_S1 1720x4208 GA_POINT_S1
@@ -171,6 +176,7 @@ rv515 0x6d40
1710x4230 GA_POINT_MINMAX 1760x4230 GA_POINT_MINMAX
1720x4234 GA_LINE_CNTL 1770x4234 GA_LINE_CNTL
1730x4238 GA_LINE_STIPPLE_CONFIG 1780x4238 GA_LINE_STIPPLE_CONFIG
1790x4258 GA_COLOR_CONTROL_PS3
1740x4260 GA_LINE_STIPPLE_VALUE 1800x4260 GA_LINE_STIPPLE_VALUE
1750x4264 GA_LINE_S0 1810x4264 GA_LINE_S0
1760x4268 GA_LINE_S1 1820x4268 GA_LINE_S1
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 368415df5f3a..287fcebfb4e6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
223 return 0; 223 return 0;
224} 224}
225 225
226int rs400_mc_wait_for_idle(struct radeon_device *rdev)
227{
228 unsigned i;
229 uint32_t tmp;
230
231 for (i = 0; i < rdev->usec_timeout; i++) {
232 /* read MC_STATUS */
233 tmp = RREG32(0x0150);
234 if (tmp & (1 << 2)) {
235 return 0;
236 }
237 DRM_UDELAY(1);
238 }
239 return -1;
240}
241
226void rs400_gpu_init(struct radeon_device *rdev) 242void rs400_gpu_init(struct radeon_device *rdev)
227{ 243{
228 /* FIXME: HDP same place on rs400 ? */ 244 /* FIXME: HDP same place on rs400 ? */
229 r100_hdp_reset(rdev); 245 r100_hdp_reset(rdev);
230 /* FIXME: is this correct ? */ 246 /* FIXME: is this correct ? */
231 r420_pipes_init(rdev); 247 r420_pipes_init(rdev);
232 if (r300_mc_wait_for_idle(rdev)) { 248 if (rs400_mc_wait_for_idle(rdev)) {
233 printk(KERN_WARNING "Failed to wait MC idle while " 249 printk(KERN_WARNING "rs400: Failed to wait MC idle while "
234 "programming pipes. Bad things might happen.\n"); 250 "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
235 } 251 }
236} 252}
237 253
@@ -356,6 +372,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
356 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; 372 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
357 rdev->mc.gtt_location = 0xFFFFFFFFUL; 373 rdev->mc.gtt_location = 0xFFFFFFFFUL;
358 r = radeon_mc_setup(rdev); 374 r = radeon_mc_setup(rdev);
375 rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
359 if (r) 376 if (r)
360 return r; 377 return r;
361 return 0; 378 return 0;
@@ -369,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
369 r100_mc_stop(rdev, &save); 386 r100_mc_stop(rdev, &save);
370 387
371 /* Wait for mc idle */ 388 /* Wait for mc idle */
372 if (r300_mc_wait_for_idle(rdev)) 389 if (rs400_mc_wait_for_idle(rdev))
373 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 390 dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
374 WREG32(R_000148_MC_FB_LOCATION, 391 WREG32(R_000148_MC_FB_LOCATION,
375 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 392 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
376 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 393 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -395,6 +412,7 @@ static int rs400_startup(struct radeon_device *rdev)
395 return r; 412 return r;
396 /* Enable IRQ */ 413 /* Enable IRQ */
397 r100_irq_set(rdev); 414 r100_irq_set(rdev);
415 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
398 /* 1M ring buffer */ 416 /* 1M ring buffer */
399 r = r100_cp_init(rdev, 1024 * 1024); 417 r = r100_cp_init(rdev, 1024 * 1024);
400 if (r) { 418 if (r) {
@@ -446,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
446 464
447void rs400_fini(struct radeon_device *rdev) 465void rs400_fini(struct radeon_device *rdev)
448{ 466{
449 rs400_suspend(rdev);
450 r100_cp_fini(rdev); 467 r100_cp_fini(rdev);
451 r100_wb_fini(rdev); 468 r100_wb_fini(rdev);
452 r100_ib_fini(rdev); 469 r100_ib_fini(rdev);
@@ -525,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
525 if (r) { 542 if (r) {
526 /* Somethings want wront with the accel init stop accel */ 543 /* Somethings want wront with the accel init stop accel */
527 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 544 dev_err(rdev->dev, "Disabling GPU acceleration\n");
528 rs400_suspend(rdev);
529 r100_cp_fini(rdev); 545 r100_cp_fini(rdev);
530 r100_wb_fini(rdev); 546 r100_wb_fini(rdev);
531 r100_ib_fini(rdev); 547 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4f8ea4260572..c3818562a13e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -56,6 +56,7 @@ int rs600_mc_init(struct radeon_device *rdev)
56 rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; 56 rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
57 rdev->mc.gtt_location = 0xffffffffUL; 57 rdev->mc.gtt_location = 0xffffffffUL;
58 r = radeon_mc_setup(rdev); 58 r = radeon_mc_setup(rdev);
59 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
59 if (r) 60 if (r)
60 return r; 61 return r;
61 return 0; 62 return 0;
@@ -134,7 +135,8 @@ void rs600_hpd_init(struct radeon_device *rdev)
134 break; 135 break;
135 } 136 }
136 } 137 }
137 rs600_irq_set(rdev); 138 if (rdev->irq.installed)
139 rs600_irq_set(rdev);
138} 140}
139 141
140void rs600_hpd_fini(struct radeon_device *rdev) 142void rs600_hpd_fini(struct radeon_device *rdev)
@@ -315,6 +317,11 @@ int rs600_irq_set(struct radeon_device *rdev)
315 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & 317 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
316 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 318 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
317 319
320 if (!rdev->irq.installed) {
321 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
322 WREG32(R_000040_GEN_INT_CNTL, 0);
323 return -EINVAL;
324 }
318 if (rdev->irq.sw_int) { 325 if (rdev->irq.sw_int) {
319 tmp |= S_000040_SW_INT_EN(1); 326 tmp |= S_000040_SW_INT_EN(1);
320 } 327 }
@@ -396,7 +403,7 @@ int rs600_irq_process(struct radeon_device *rdev)
396 } 403 }
397 while (status || r500_disp_int) { 404 while (status || r500_disp_int) {
398 /* SW interrupt */ 405 /* SW interrupt */
399 if (G_000040_SW_INT_EN(status)) 406 if (G_000044_SW_INT(status))
400 radeon_fence_process(rdev); 407 radeon_fence_process(rdev);
401 /* Vertical blank interrupts */ 408 /* Vertical blank interrupts */
402 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) 409 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
@@ -553,6 +560,7 @@ static int rs600_startup(struct radeon_device *rdev)
553 return r; 560 return r;
554 /* Enable IRQ */ 561 /* Enable IRQ */
555 rs600_irq_set(rdev); 562 rs600_irq_set(rdev);
563 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
556 /* 1M ring buffer */ 564 /* 1M ring buffer */
557 r = r100_cp_init(rdev, 1024 * 1024); 565 r = r100_cp_init(rdev, 1024 * 1024);
558 if (r) { 566 if (r) {
@@ -602,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
602 610
603void rs600_fini(struct radeon_device *rdev) 611void rs600_fini(struct radeon_device *rdev)
604{ 612{
605 rs600_suspend(rdev);
606 r100_cp_fini(rdev); 613 r100_cp_fini(rdev);
607 r100_wb_fini(rdev); 614 r100_wb_fini(rdev);
608 r100_ib_fini(rdev); 615 r100_ib_fini(rdev);
@@ -681,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
681 if (r) { 688 if (r) {
682 /* Somethings want wront with the accel init stop accel */ 689 /* Somethings want wront with the accel init stop accel */
683 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 690 dev_err(rdev->dev, "Disabling GPU acceleration\n");
684 rs600_suspend(rdev);
685 r100_cp_fini(rdev); 691 r100_cp_fini(rdev);
686 r100_wb_fini(rdev); 692 r100_wb_fini(rdev);
687 r100_ib_fini(rdev); 693 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1e22f52d6039..06e2771aee5a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -172,6 +172,7 @@ static int rs690_mc_init(struct radeon_device *rdev)
172 rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16; 172 rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
173 rdev->mc.gtt_location = 0xFFFFFFFFUL; 173 rdev->mc.gtt_location = 0xFFFFFFFFUL;
174 r = radeon_mc_setup(rdev); 174 r = radeon_mc_setup(rdev);
175 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
175 if (r) 176 if (r)
176 return r; 177 return r;
177 return 0; 178 return 0;
@@ -625,6 +626,7 @@ static int rs690_startup(struct radeon_device *rdev)
625 return r; 626 return r;
626 /* Enable IRQ */ 627 /* Enable IRQ */
627 rs600_irq_set(rdev); 628 rs600_irq_set(rdev);
629 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
628 /* 1M ring buffer */ 630 /* 1M ring buffer */
629 r = r100_cp_init(rdev, 1024 * 1024); 631 r = r100_cp_init(rdev, 1024 * 1024);
630 if (r) { 632 if (r) {
@@ -674,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
674 676
675void rs690_fini(struct radeon_device *rdev) 677void rs690_fini(struct radeon_device *rdev)
676{ 678{
677 rs690_suspend(rdev);
678 r100_cp_fini(rdev); 679 r100_cp_fini(rdev);
679 r100_wb_fini(rdev); 680 r100_wb_fini(rdev);
680 r100_ib_fini(rdev); 681 r100_ib_fini(rdev);
@@ -754,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
754 if (r) { 755 if (r) {
755 /* Somethings want wront with the accel init stop accel */ 756 /* Somethings want wront with the accel init stop accel */
756 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 757 dev_err(rdev->dev, "Disabling GPU acceleration\n");
757 rs690_suspend(rdev);
758 r100_cp_fini(rdev); 758 r100_cp_fini(rdev);
759 r100_wb_fini(rdev); 759 r100_wb_fini(rdev);
760 r100_ib_fini(rdev); 760 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 59632a506b46..0e1e6b8632b8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -479,6 +479,7 @@ static int rv515_startup(struct radeon_device *rdev)
479 } 479 }
480 /* Enable IRQ */ 480 /* Enable IRQ */
481 rs600_irq_set(rdev); 481 rs600_irq_set(rdev);
482 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
482 /* 1M ring buffer */ 483 /* 1M ring buffer */
483 r = r100_cp_init(rdev, 1024 * 1024); 484 r = r100_cp_init(rdev, 1024 * 1024);
484 if (r) { 485 if (r) {
@@ -536,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
536 537
537void rv515_fini(struct radeon_device *rdev) 538void rv515_fini(struct radeon_device *rdev)
538{ 539{
539 rv515_suspend(rdev);
540 r100_cp_fini(rdev); 540 r100_cp_fini(rdev);
541 r100_wb_fini(rdev); 541 r100_wb_fini(rdev);
542 r100_ib_fini(rdev); 542 r100_ib_fini(rdev);
@@ -614,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
614 if (r) { 614 if (r) {
615 /* Somethings want wront with the accel init stop accel */ 615 /* Somethings want wront with the accel init stop accel */
616 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 616 dev_err(rdev->dev, "Disabling GPU acceleration\n");
617 rv515_suspend(rdev);
618 r100_cp_fini(rdev); 617 r100_cp_fini(rdev);
619 r100_wb_fini(rdev); 618 r100_wb_fini(rdev);
620 r100_ib_fini(rdev); 619 r100_ib_fini(rdev);
620 radeon_irq_kms_fini(rdev);
621 rv370_pcie_gart_fini(rdev); 621 rv370_pcie_gart_fini(rdev);
622 radeon_agp_fini(rdev); 622 radeon_agp_fini(rdev);
623 radeon_irq_kms_fini(rdev);
624 rdev->accel_working = false; 623 rdev->accel_working = false;
625 } 624 }
626 return 0; 625 return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3bcb66e52786..5943d561fd1e 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -779,7 +779,6 @@ int rv770_mc_init(struct radeon_device *rdev)
779 fixed20_12 a; 779 fixed20_12 a;
780 u32 tmp; 780 u32 tmp;
781 int chansize, numchan; 781 int chansize, numchan;
782 int r;
783 782
784 /* Get VRAM informations */ 783 /* Get VRAM informations */
785 rdev->mc.vram_is_ddr = true; 784 rdev->mc.vram_is_ddr = true;
@@ -822,9 +821,6 @@ int rv770_mc_init(struct radeon_device *rdev)
822 rdev->mc.real_vram_size = rdev->mc.aper_size; 821 rdev->mc.real_vram_size = rdev->mc.aper_size;
823 822
824 if (rdev->flags & RADEON_IS_AGP) { 823 if (rdev->flags & RADEON_IS_AGP) {
825 r = radeon_agp_init(rdev);
826 if (r)
827 return r;
828 /* gtt_size is setup by radeon_agp_init */ 824 /* gtt_size is setup by radeon_agp_init */
829 rdev->mc.gtt_location = rdev->mc.agp_base; 825 rdev->mc.gtt_location = rdev->mc.agp_base;
830 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; 826 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -891,26 +887,25 @@ static int rv770_startup(struct radeon_device *rdev)
891 return r; 887 return r;
892 } 888 }
893 rv770_gpu_init(rdev); 889 rv770_gpu_init(rdev);
894 890 r = r600_blit_init(rdev);
895 if (!rdev->r600_blit.shader_obj) { 891 if (r) {
896 r = r600_blit_init(rdev); 892 r600_blit_fini(rdev);
893 rdev->asic->copy = NULL;
894 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
895 }
896 /* pin copy shader into vram */
897 if (rdev->r600_blit.shader_obj) {
898 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
899 if (unlikely(r != 0))
900 return r;
901 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
902 &rdev->r600_blit.shader_gpu_addr);
903 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
897 if (r) { 904 if (r) {
898 DRM_ERROR("radeon: failed blitter (%d).\n", r); 905 DRM_ERROR("failed to pin blit object %d\n", r);
899 return r; 906 return r;
900 } 907 }
901 } 908 }
902
903 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
904 if (unlikely(r != 0))
905 return r;
906 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
907 &rdev->r600_blit.shader_gpu_addr);
908 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
909 if (r) {
910 DRM_ERROR("failed to pin blit object %d\n", r);
911 return r;
912 }
913
914 /* Enable IRQ */ 909 /* Enable IRQ */
915 r = r600_irq_init(rdev); 910 r = r600_irq_init(rdev);
916 if (r) { 911 if (r) {
@@ -972,13 +967,16 @@ int rv770_suspend(struct radeon_device *rdev)
972 /* FIXME: we should wait for ring to be empty */ 967 /* FIXME: we should wait for ring to be empty */
973 r700_cp_stop(rdev); 968 r700_cp_stop(rdev);
974 rdev->cp.ready = false; 969 rdev->cp.ready = false;
970 r600_irq_suspend(rdev);
975 r600_wb_disable(rdev); 971 r600_wb_disable(rdev);
976 rv770_pcie_gart_disable(rdev); 972 rv770_pcie_gart_disable(rdev);
977 /* unpin shaders bo */ 973 /* unpin shaders bo */
978 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 974 if (rdev->r600_blit.shader_obj) {
979 if (likely(r == 0)) { 975 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
980 radeon_bo_unpin(rdev->r600_blit.shader_obj); 976 if (likely(r == 0)) {
981 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 977 radeon_bo_unpin(rdev->r600_blit.shader_obj);
978 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
979 }
982 } 980 }
983 return 0; 981 return 0;
984} 982}
@@ -1037,6 +1035,11 @@ int rv770_init(struct radeon_device *rdev)
1037 r = radeon_fence_driver_init(rdev); 1035 r = radeon_fence_driver_init(rdev);
1038 if (r) 1036 if (r)
1039 return r; 1037 return r;
1038 if (rdev->flags & RADEON_IS_AGP) {
1039 r = radeon_agp_init(rdev);
1040 if (r)
1041 radeon_agp_disable(rdev);
1042 }
1040 r = rv770_mc_init(rdev); 1043 r = rv770_mc_init(rdev);
1041 if (r) 1044 if (r)
1042 return r; 1045 return r;
@@ -1062,22 +1065,25 @@ int rv770_init(struct radeon_device *rdev)
1062 rdev->accel_working = true; 1065 rdev->accel_working = true;
1063 r = rv770_startup(rdev); 1066 r = rv770_startup(rdev);
1064 if (r) { 1067 if (r) {
1065 rv770_suspend(rdev); 1068 dev_err(rdev->dev, "disabling GPU acceleration\n");
1069 r600_cp_fini(rdev);
1066 r600_wb_fini(rdev); 1070 r600_wb_fini(rdev);
1067 radeon_ring_fini(rdev); 1071 r600_irq_fini(rdev);
1072 radeon_irq_kms_fini(rdev);
1068 rv770_pcie_gart_fini(rdev); 1073 rv770_pcie_gart_fini(rdev);
1069 rdev->accel_working = false; 1074 rdev->accel_working = false;
1070 } 1075 }
1071 if (rdev->accel_working) { 1076 if (rdev->accel_working) {
1072 r = radeon_ib_pool_init(rdev); 1077 r = radeon_ib_pool_init(rdev);
1073 if (r) { 1078 if (r) {
1074 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 1079 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1075 rdev->accel_working = false;
1076 }
1077 r = r600_ib_test(rdev);
1078 if (r) {
1079 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1080 rdev->accel_working = false; 1080 rdev->accel_working = false;
1081 } else {
1082 r = r600_ib_test(rdev);
1083 if (r) {
1084 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1085 rdev->accel_working = false;
1086 }
1081 } 1087 }
1082 } 1088 }
1083 return 0; 1089 return 0;
@@ -1085,19 +1091,16 @@ int rv770_init(struct radeon_device *rdev)
1085 1091
1086void rv770_fini(struct radeon_device *rdev) 1092void rv770_fini(struct radeon_device *rdev)
1087{ 1093{
1088 rv770_suspend(rdev);
1089
1090 r600_blit_fini(rdev); 1094 r600_blit_fini(rdev);
1095 r600_cp_fini(rdev);
1096 r600_wb_fini(rdev);
1091 r600_irq_fini(rdev); 1097 r600_irq_fini(rdev);
1092 radeon_irq_kms_fini(rdev); 1098 radeon_irq_kms_fini(rdev);
1093 radeon_ring_fini(rdev);
1094 r600_wb_fini(rdev);
1095 rv770_pcie_gart_fini(rdev); 1099 rv770_pcie_gart_fini(rdev);
1096 radeon_gem_fini(rdev); 1100 radeon_gem_fini(rdev);
1097 radeon_fence_driver_fini(rdev); 1101 radeon_fence_driver_fini(rdev);
1098 radeon_clocks_fini(rdev); 1102 radeon_clocks_fini(rdev);
1099 if (rdev->flags & RADEON_IS_AGP) 1103 radeon_agp_fini(rdev);
1100 radeon_agp_fini(rdev);
1101 radeon_bo_fini(rdev); 1104 radeon_bo_fini(rdev);
1102 radeon_atombios_fini(rdev); 1105 radeon_atombios_fini(rdev);
1103 kfree(rdev->bios); 1106 kfree(rdev->bios);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2920f9a279e1..c7320ce4567d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -426,7 +426,8 @@ moved:
426 bdev->man[bo->mem.mem_type].gpu_offset; 426 bdev->man[bo->mem.mem_type].gpu_offset;
427 bo->cur_placement = bo->mem.placement; 427 bo->cur_placement = bo->mem.placement;
428 spin_unlock(&bo->lock); 428 spin_unlock(&bo->lock);
429 } 429 } else
430 bo->offset = 0;
430 431
431 return 0; 432 return 0;
432 433
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
523static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 524static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
524{ 525{
525 struct ttm_bo_global *glob = bdev->glob; 526 struct ttm_bo_global *glob = bdev->glob;
526 struct ttm_buffer_object *entry, *nentry; 527 struct ttm_buffer_object *entry = NULL;
527 struct list_head *list, *next; 528 int ret = 0;
528 int ret;
529 529
530 spin_lock(&glob->lru_lock); 530 spin_lock(&glob->lru_lock);
531 list_for_each_safe(list, next, &bdev->ddestroy) { 531 if (list_empty(&bdev->ddestroy))
532 entry = list_entry(list, struct ttm_buffer_object, ddestroy); 532 goto out_unlock;
533 nentry = NULL;
534 533
535 /* 534 entry = list_first_entry(&bdev->ddestroy,
536 * Protect the next list entry from destruction while we 535 struct ttm_buffer_object, ddestroy);
537 * unlock the lru_lock. 536 kref_get(&entry->list_kref);
538 */
539 537
540 if (next != &bdev->ddestroy) { 538 for (;;) {
541 nentry = list_entry(next, struct ttm_buffer_object, 539 struct ttm_buffer_object *nentry = NULL;
542 ddestroy); 540
541 if (entry->ddestroy.next != &bdev->ddestroy) {
542 nentry = list_first_entry(&entry->ddestroy,
543 struct ttm_buffer_object, ddestroy);
543 kref_get(&nentry->list_kref); 544 kref_get(&nentry->list_kref);
544 } 545 }
545 kref_get(&entry->list_kref);
546 546
547 spin_unlock(&glob->lru_lock); 547 spin_unlock(&glob->lru_lock);
548 ret = ttm_bo_cleanup_refs(entry, remove_all); 548 ret = ttm_bo_cleanup_refs(entry, remove_all);
549 kref_put(&entry->list_kref, ttm_bo_release_list); 549 kref_put(&entry->list_kref, ttm_bo_release_list);
550 entry = nentry;
551
552 if (ret || !entry)
553 goto out;
550 554
551 spin_lock(&glob->lru_lock); 555 spin_lock(&glob->lru_lock);
552 if (nentry) { 556 if (list_empty(&entry->ddestroy))
553 bool next_onlist = !list_empty(next);
554 spin_unlock(&glob->lru_lock);
555 kref_put(&nentry->list_kref, ttm_bo_release_list);
556 spin_lock(&glob->lru_lock);
557 /*
558 * Someone might have raced us and removed the
559 * next entry from the list. We don't bother restarting
560 * list traversal.
561 */
562
563 if (!next_onlist)
564 break;
565 }
566 if (ret)
567 break; 557 break;
568 } 558 }
569 ret = !list_empty(&bdev->ddestroy);
570 spin_unlock(&glob->lru_lock);
571 559
560out_unlock:
561 spin_unlock(&glob->lru_lock);
562out:
563 if (entry)
564 kref_put(&entry->list_kref, ttm_bo_release_list);
572 return ret; 565 return ret;
573} 566}
574 567
@@ -950,6 +943,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
950 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 943 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
951 ~TTM_PL_MASK_MEMTYPE); 944 ~TTM_PL_MASK_MEMTYPE);
952 945
946
947 if (mem_type == TTM_PL_SYSTEM) {
948 mem->mem_type = mem_type;
949 mem->placement = cur_flags;
950 mem->mm_node = NULL;
951 return 0;
952 }
953
953 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
954 interruptible, no_wait); 955 interruptible, no_wait);
955 if (ret == 0 && mem->mm_node) { 956 if (ret == 0 && mem->mm_node) {
@@ -1019,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1019 struct ttm_mem_reg *mem) 1020 struct ttm_mem_reg *mem)
1020{ 1021{
1021 int i; 1022 int i;
1023 struct drm_mm_node *node = mem->mm_node;
1024
1025 if (node && placement->lpfn != 0 &&
1026 (node->start < placement->fpfn ||
1027 node->start + node->size > placement->lpfn))
1028 return -1;
1022 1029
1023 for (i = 0; i < placement->num_placement; i++) { 1030 for (i = 0; i < placement->num_placement; i++) {
1024 if ((placement->placement[i] & mem->placement & 1031 if ((placement->placement[i] & mem->placement &
@@ -1844,6 +1851,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1844 * anyone tries to access a ttm page. 1851 * anyone tries to access a ttm page.
1845 */ 1852 */
1846 1853
1854 if (bo->bdev->driver->swap_notify)
1855 bo->bdev->driver->swap_notify(bo);
1856
1847 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); 1857 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1848out: 1858out:
1849 1859
@@ -1864,3 +1874,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1864 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1874 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1865 ; 1875 ;
1866} 1876}
1877EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ecf7d0c64f6..5ca37a58a98c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53{ 53{
54 struct ttm_tt *ttm = bo->ttm; 54 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem; 55 struct ttm_mem_reg *old_mem = &bo->mem;
56 uint32_t save_flags = old_mem->placement;
57 int ret; 56 int ret;
58 57
59 if (old_mem->mem_type != TTM_PL_SYSTEM) { 58 if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 61 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63 TTM_PL_MASK_MEM); 62 TTM_PL_MASK_MEM);
64 old_mem->mem_type = TTM_PL_SYSTEM; 63 old_mem->mem_type = TTM_PL_SYSTEM;
65 save_flags = old_mem->placement;
66 } 64 }
67 65
68 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 66 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
@@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
77 75
78 *old_mem = *new_mem; 76 *old_mem = *new_mem;
79 new_mem->mm_node = NULL; 77 new_mem->mm_node = NULL;
80 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 78
81 return 0; 79 return 0;
82} 80}
83EXPORT_SYMBOL(ttm_bo_move_ttm); 81EXPORT_SYMBOL(ttm_bo_move_ttm);
@@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
219 void *old_iomap; 217 void *old_iomap;
220 void *new_iomap; 218 void *new_iomap;
221 int ret; 219 int ret;
222 uint32_t save_flags = old_mem->placement;
223 unsigned long i; 220 unsigned long i;
224 unsigned long page; 221 unsigned long page;
225 unsigned long add = 0; 222 unsigned long add = 0;
@@ -270,7 +267,6 @@ out2:
270 267
271 *old_mem = *new_mem; 268 *old_mem = *new_mem;
272 new_mem->mm_node = NULL; 269 new_mem->mm_node = NULL;
273 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
274 270
275 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 271 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
276 ttm_tt_unbind(ttm); 272 ttm_tt_unbind(ttm);
@@ -537,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
537 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 533 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
538 struct ttm_mem_reg *old_mem = &bo->mem; 534 struct ttm_mem_reg *old_mem = &bo->mem;
539 int ret; 535 int ret;
540 uint32_t save_flags = old_mem->placement;
541 struct ttm_buffer_object *ghost_obj; 536 struct ttm_buffer_object *ghost_obj;
542 void *tmp_obj = NULL; 537 void *tmp_obj = NULL;
543 538
@@ -598,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
598 593
599 *old_mem = *new_mem; 594 *old_mem = *new_mem;
600 new_mem->mm_node = NULL; 595 new_mem->mm_node = NULL;
601 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 596
602 return 0; 597 return 0;
603} 598}
604EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 599EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index f619ebcaa4ec..3d172ef04ee1 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
288 wake_up_all(&lock->queue); 288 wake_up_all(&lock->queue);
289 spin_unlock(&lock->lock); 289 spin_unlock(&lock->lock);
290} 290}
291EXPORT_SYMBOL(ttm_suspend_unlock);
291 292
292static bool __ttm_suspend_lock(struct ttm_lock *lock) 293static bool __ttm_suspend_lock(struct ttm_lock *lock)
293{ 294{
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock)
309{ 310{
310 wait_event(lock->queue, __ttm_suspend_lock(lock)); 311 wait_event(lock->queue, __ttm_suspend_lock(lock));
311} 312}
313EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1099abac824b..75e9d6f86ba4 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -109,8 +109,8 @@ struct ttm_ref_object {
109 struct drm_hash_item hash; 109 struct drm_hash_item hash;
110 struct list_head head; 110 struct list_head head;
111 struct kref kref; 111 struct kref kref;
112 struct ttm_base_object *obj;
113 enum ttm_ref_type ref_type; 112 enum ttm_ref_type ref_type;
113 struct ttm_base_object *obj;
114 struct ttm_object_file *tfile; 114 struct ttm_object_file *tfile;
115}; 115};
116 116
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9c2b1cc5dba5..e2123af7775a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -198,17 +198,26 @@ EXPORT_SYMBOL(ttm_tt_populate);
198static inline int ttm_tt_set_page_caching(struct page *p, 198static inline int ttm_tt_set_page_caching(struct page *p,
199 enum ttm_caching_state c_state) 199 enum ttm_caching_state c_state)
200{ 200{
201 int ret = 0;
202
201 if (PageHighMem(p)) 203 if (PageHighMem(p))
202 return 0; 204 return 0;
203 205
204 switch (c_state) { 206 if (get_page_memtype(p) != -1) {
205 case tt_cached: 207 /* p isn't in the default caching state, set it to
206 return set_pages_wb(p, 1); 208 * writeback first to free its current memtype. */
207 case tt_wc: 209
208 return set_memory_wc((unsigned long) page_address(p), 1); 210 ret = set_pages_wb(p, 1);
209 default: 211 if (ret)
210 return set_pages_uc(p, 1); 212 return ret;
211 } 213 }
214
215 if (c_state == tt_wc)
216 ret = set_memory_wc((unsigned long) page_address(p), 1);
217 else if (c_state == tt_uncached)
218 ret = set_pages_uc(p, 1);
219
220 return ret;
212} 221}
213#else /* CONFIG_X86 */ 222#else /* CONFIG_X86 */
214static inline int ttm_tt_set_page_caching(struct page *p, 223static inline int ttm_tt_set_page_caching(struct page *p,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index d6f2d2b882e9..825ebe3d89d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -48,6 +48,15 @@ struct ttm_placement vmw_vram_placement = {
48 .busy_placement = &vram_placement_flags 48 .busy_placement = &vram_placement_flags
49}; 49};
50 50
51struct ttm_placement vmw_vram_sys_placement = {
52 .fpfn = 0,
53 .lpfn = 0,
54 .num_placement = 1,
55 .placement = &vram_placement_flags,
56 .num_busy_placement = 1,
57 .busy_placement = &sys_placement_flags
58};
59
51struct ttm_placement vmw_vram_ne_placement = { 60struct ttm_placement vmw_vram_ne_placement = {
52 .fpfn = 0, 61 .fpfn = 0,
53 .lpfn = 0, 62 .lpfn = 0,
@@ -172,6 +181,18 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
172 return 0; 181 return 0;
173} 182}
174 183
184static void vmw_move_notify(struct ttm_buffer_object *bo,
185 struct ttm_mem_reg *new_mem)
186{
187 if (new_mem->mem_type != TTM_PL_SYSTEM)
188 vmw_dmabuf_gmr_unbind(bo);
189}
190
191static void vmw_swap_notify(struct ttm_buffer_object *bo)
192{
193 vmw_dmabuf_gmr_unbind(bo);
194}
195
175/** 196/**
176 * FIXME: We're using the old vmware polling method to sync. 197 * FIXME: We're using the old vmware polling method to sync.
177 * Do this with fences instead. 198 * Do this with fences instead.
@@ -225,5 +246,7 @@ struct ttm_bo_driver vmw_bo_driver = {
225 .sync_obj_wait = vmw_sync_obj_wait, 246 .sync_obj_wait = vmw_sync_obj_wait,
226 .sync_obj_flush = vmw_sync_obj_flush, 247 .sync_obj_flush = vmw_sync_obj_flush,
227 .sync_obj_unref = vmw_sync_obj_unref, 248 .sync_obj_unref = vmw_sync_obj_unref,
228 .sync_obj_ref = vmw_sync_obj_ref 249 .sync_obj_ref = vmw_sync_obj_ref,
250 .move_notify = vmw_move_notify,
251 .swap_notify = vmw_swap_notify
229}; 252};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1db1ef30be2b..0c9c0811f42d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -147,6 +147,8 @@ static char *vmw_devname = "vmwgfx";
147 147
148static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 148static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
149static void vmw_master_init(struct vmw_master *); 149static void vmw_master_init(struct vmw_master *);
150static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
151 void *ptr);
150 152
151static void vmw_print_capabilities(uint32_t capabilities) 153static void vmw_print_capabilities(uint32_t capabilities)
152{ 154{
@@ -207,6 +209,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
207{ 209{
208 struct vmw_private *dev_priv; 210 struct vmw_private *dev_priv;
209 int ret; 211 int ret;
212 uint32_t svga_id;
210 213
211 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 214 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
212 if (unlikely(dev_priv == NULL)) { 215 if (unlikely(dev_priv == NULL)) {
@@ -217,6 +220,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
217 220
218 dev_priv->dev = dev; 221 dev_priv->dev = dev;
219 dev_priv->vmw_chipset = chipset; 222 dev_priv->vmw_chipset = chipset;
223 dev_priv->last_read_sequence = (uint32_t) -100;
220 mutex_init(&dev_priv->hw_mutex); 224 mutex_init(&dev_priv->hw_mutex);
221 mutex_init(&dev_priv->cmdbuf_mutex); 225 mutex_init(&dev_priv->cmdbuf_mutex);
222 rwlock_init(&dev_priv->resource_lock); 226 rwlock_init(&dev_priv->resource_lock);
@@ -236,6 +240,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
236 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 240 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
237 241
238 mutex_lock(&dev_priv->hw_mutex); 242 mutex_lock(&dev_priv->hw_mutex);
243
244 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
245 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
246 if (svga_id != SVGA_ID_2) {
247 ret = -ENOSYS;
248 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
249 mutex_unlock(&dev_priv->hw_mutex);
250 goto out_err0;
251 }
252
239 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 253 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
240 254
241 if (dev_priv->capabilities & SVGA_CAP_GMR) { 255 if (dev_priv->capabilities & SVGA_CAP_GMR) {
@@ -334,22 +348,24 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
334 */ 348 */
335 349
336 DRM_INFO("It appears like vesafb is loaded. " 350 DRM_INFO("It appears like vesafb is loaded. "
337 "Ignore above error if any. Entering stealth mode.\n"); 351 "Ignore above error if any.\n");
338 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
339 if (unlikely(ret != 0)) { 353 if (unlikely(ret != 0)) {
340 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
341 goto out_no_device; 355 goto out_no_device;
342 } 356 }
343 vmw_kms_init(dev_priv);
344 vmw_overlay_init(dev_priv);
345 } else {
346 ret = vmw_request_device(dev_priv);
347 if (unlikely(ret != 0))
348 goto out_no_device;
349 vmw_kms_init(dev_priv);
350 vmw_overlay_init(dev_priv);
351 vmw_fb_init(dev_priv);
352 } 357 }
358 ret = vmw_request_device(dev_priv);
359 if (unlikely(ret != 0))
360 goto out_no_device;
361 vmw_kms_init(dev_priv);
362 vmw_overlay_init(dev_priv);
363 vmw_fb_init(dev_priv);
364
365 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
366 register_pm_notifier(&dev_priv->pm_nb);
367
368 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
353 369
354 return 0; 370 return 0;
355 371
@@ -385,17 +401,17 @@ static int vmw_driver_unload(struct drm_device *dev)
385 401
386 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); 402 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
387 403
388 if (!dev_priv->stealth) { 404 unregister_pm_notifier(&dev_priv->pm_nb);
389 vmw_fb_close(dev_priv); 405
390 vmw_kms_close(dev_priv); 406 vmw_fb_close(dev_priv);
391 vmw_overlay_close(dev_priv); 407 vmw_kms_close(dev_priv);
392 vmw_release_device(dev_priv); 408 vmw_overlay_close(dev_priv);
393 pci_release_regions(dev->pdev); 409 vmw_release_device(dev_priv);
394 } else { 410 if (dev_priv->stealth)
395 vmw_kms_close(dev_priv);
396 vmw_overlay_close(dev_priv);
397 pci_release_region(dev->pdev, 2); 411 pci_release_region(dev->pdev, 2);
398 } 412 else
413 pci_release_regions(dev->pdev);
414
399 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 415 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
400 drm_irq_uninstall(dev_priv->dev); 416 drm_irq_uninstall(dev_priv->dev);
401 if (dev->devname == vmw_devname) 417 if (dev->devname == vmw_devname)
@@ -564,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
564 int ret = 0; 580 int ret = 0;
565 581
566 DRM_INFO("Master set.\n"); 582 DRM_INFO("Master set.\n");
567 if (dev_priv->stealth) {
568 ret = vmw_request_device(dev_priv);
569 if (unlikely(ret != 0))
570 return ret;
571 }
572 583
573 if (active) { 584 if (active) {
574 BUG_ON(active != &dev_priv->fbdev_master); 585 BUG_ON(active != &dev_priv->fbdev_master);
@@ -628,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
628 639
629 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 640 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
630 641
631 if (dev_priv->stealth) {
632 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
633 if (unlikely(ret != 0))
634 DRM_ERROR("Unable to clean VRAM on master drop.\n");
635 vmw_release_device(dev_priv);
636 }
637 dev_priv->active_master = &dev_priv->fbdev_master; 642 dev_priv->active_master = &dev_priv->fbdev_master;
638 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 643 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
639 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 644 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
640 645
641 if (!dev_priv->stealth) 646 vmw_fb_on(dev_priv);
642 vmw_fb_on(dev_priv);
643} 647}
644 648
645 649
@@ -650,6 +654,57 @@ static void vmw_remove(struct pci_dev *pdev)
650 drm_put_dev(dev); 654 drm_put_dev(dev);
651} 655}
652 656
657static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
658 void *ptr)
659{
660 struct vmw_private *dev_priv =
661 container_of(nb, struct vmw_private, pm_nb);
662 struct vmw_master *vmaster = dev_priv->active_master;
663
664 switch (val) {
665 case PM_HIBERNATION_PREPARE:
666 case PM_SUSPEND_PREPARE:
667 ttm_suspend_lock(&vmaster->lock);
668
669 /**
670 * This empties VRAM and unbinds all GMR bindings.
671 * Buffer contents is moved to swappable memory.
672 */
673 ttm_bo_swapout_all(&dev_priv->bdev);
674 break;
675 case PM_POST_HIBERNATION:
676 case PM_POST_SUSPEND:
677 ttm_suspend_unlock(&vmaster->lock);
678 break;
679 case PM_RESTORE_PREPARE:
680 break;
681 case PM_POST_RESTORE:
682 break;
683 default:
684 break;
685 }
686 return 0;
687}
688
689/**
690 * These might not be needed with the virtual SVGA device.
691 */
692
693int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
694{
695 pci_save_state(pdev);
696 pci_disable_device(pdev);
697 pci_set_power_state(pdev, PCI_D3hot);
698 return 0;
699}
700
701int vmw_pci_resume(struct pci_dev *pdev)
702{
703 pci_set_power_state(pdev, PCI_D0);
704 pci_restore_state(pdev);
705 return pci_enable_device(pdev);
706}
707
653static struct drm_driver driver = { 708static struct drm_driver driver = {
654 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 709 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
655 DRIVER_MODESET, 710 DRIVER_MODESET,
@@ -689,7 +744,9 @@ static struct drm_driver driver = {
689 .name = VMWGFX_DRIVER_NAME, 744 .name = VMWGFX_DRIVER_NAME,
690 .id_table = vmw_pci_id_list, 745 .id_table = vmw_pci_id_list,
691 .probe = vmw_probe, 746 .probe = vmw_probe,
692 .remove = vmw_remove 747 .remove = vmw_remove,
748 .suspend = vmw_pci_suspend,
749 .resume = vmw_pci_resume
693 }, 750 },
694 .name = VMWGFX_DRIVER_NAME, 751 .name = VMWGFX_DRIVER_NAME,
695 .desc = VMWGFX_DRIVER_DESC, 752 .desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e61bd85b6975..356dc935ec13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -32,16 +32,17 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "vmwgfx_drm.h" 33#include "vmwgfx_drm.h"
34#include "drm_hashtab.h" 34#include "drm_hashtab.h"
35#include "linux/suspend.h"
35#include "ttm/ttm_bo_driver.h" 36#include "ttm/ttm_bo_driver.h"
36#include "ttm/ttm_object.h" 37#include "ttm/ttm_object.h"
37#include "ttm/ttm_lock.h" 38#include "ttm/ttm_lock.h"
38#include "ttm/ttm_execbuf_util.h" 39#include "ttm/ttm_execbuf_util.h"
39#include "ttm/ttm_module.h" 40#include "ttm/ttm_module.h"
40 41
41#define VMWGFX_DRIVER_DATE "20090724" 42#define VMWGFX_DRIVER_DATE "20100209"
42#define VMWGFX_DRIVER_MAJOR 0 43#define VMWGFX_DRIVER_MAJOR 1
43#define VMWGFX_DRIVER_MINOR 1 44#define VMWGFX_DRIVER_MINOR 0
44#define VMWGFX_DRIVER_PATCHLEVEL 2 45#define VMWGFX_DRIVER_PATCHLEVEL 0
45#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
46#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
47#define VMWGFX_MAX_RELOCATIONS 2048 48#define VMWGFX_MAX_RELOCATIONS 2048
@@ -95,6 +96,8 @@ struct vmw_surface {
95 struct drm_vmw_size *sizes; 96 struct drm_vmw_size *sizes;
96 uint32_t num_sizes; 97 uint32_t num_sizes;
97 98
99 bool scanout;
100
98 /* TODO so far just a extra pointer */ 101 /* TODO so far just a extra pointer */
99 struct vmw_cursor_snooper snooper; 102 struct vmw_cursor_snooper snooper;
100}; 103};
@@ -110,6 +113,7 @@ struct vmw_fifo_state {
110 unsigned long static_buffer_size; 113 unsigned long static_buffer_size;
111 bool using_bounce_buffer; 114 bool using_bounce_buffer;
112 uint32_t capabilities; 115 uint32_t capabilities;
116 struct mutex fifo_mutex;
113 struct rw_semaphore rwsem; 117 struct rw_semaphore rwsem;
114}; 118};
115 119
@@ -210,7 +214,7 @@ struct vmw_private {
210 * Fencing and IRQs. 214 * Fencing and IRQs.
211 */ 215 */
212 216
213 uint32_t fence_seq; 217 atomic_t fence_seq;
214 wait_queue_head_t fence_queue; 218 wait_queue_head_t fence_queue;
215 wait_queue_head_t fifo_queue; 219 wait_queue_head_t fifo_queue;
216 atomic_t fence_queue_waiters; 220 atomic_t fence_queue_waiters;
@@ -258,6 +262,7 @@ struct vmw_private {
258 262
259 struct vmw_master *active_master; 263 struct vmw_master *active_master;
260 struct vmw_master fbdev_master; 264 struct vmw_master fbdev_master;
265 struct notifier_block pm_nb;
261}; 266};
262 267
263static inline struct vmw_private *vmw_priv(struct drm_device *dev) 268static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -353,6 +358,7 @@ extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
353 struct vmw_dma_buffer *bo); 358 struct vmw_dma_buffer *bo);
354extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, 359extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
355 struct vmw_dma_buffer *bo); 360 struct vmw_dma_buffer *bo);
361extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
356extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 362extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
357 struct drm_file *file_priv); 363 struct drm_file *file_priv);
358extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 364extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -386,6 +392,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
386 uint32_t *sequence); 392 uint32_t *sequence);
387extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 393extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
388extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); 394extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
395extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
389 396
390/** 397/**
391 * TTM glue - vmwgfx_ttm_glue.c 398 * TTM glue - vmwgfx_ttm_glue.c
@@ -401,6 +408,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
401 408
402extern struct ttm_placement vmw_vram_placement; 409extern struct ttm_placement vmw_vram_placement;
403extern struct ttm_placement vmw_vram_ne_placement; 410extern struct ttm_placement vmw_vram_ne_placement;
411extern struct ttm_placement vmw_vram_sys_placement;
404extern struct ttm_placement vmw_sys_placement; 412extern struct ttm_placement vmw_sys_placement;
405extern struct ttm_bo_driver vmw_bo_driver; 413extern struct ttm_bo_driver vmw_bo_driver;
406extern int vmw_dma_quiescent(struct drm_device *dev); 414extern int vmw_dma_quiescent(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e92da567403..d69caf92ffe7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -490,10 +490,29 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) 490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
491 return 0; 491 return 0;
492 492
493 /**
494 * Put BO in VRAM, only if there is space.
495 */
496
497 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
498 if (unlikely(ret == -ERESTARTSYS))
499 return ret;
500
501 /**
502 * Otherwise, set it up as GMR.
503 */
504
505 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
506 return 0;
507
493 ret = vmw_gmr_bind(dev_priv, bo); 508 ret = vmw_gmr_bind(dev_priv, bo);
494 if (likely(ret == 0 || ret == -ERESTARTSYS)) 509 if (likely(ret == 0 || ret == -ERESTARTSYS))
495 return ret; 510 return ret;
496 511
512 /**
513 * If that failed, try VRAM again, this time evicting
514 * previous contents.
515 */
497 516
498 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 517 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
499 return ret; 518 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 641dde76ada1..a93367041cdc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
559 info->pixmap.scan_align = 1; 559 info->pixmap.scan_align = 1;
560#endif 560#endif
561 561
562 info->aperture_base = vmw_priv->vram_start;
563 info->aperture_size = vmw_priv->vram_size;
564
562 /* 565 /*
563 * Dirty & Deferred IO 566 * Dirty & Deferred IO
564 */ 567 */
@@ -649,14 +652,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
649 if (unlikely(ret != 0)) 652 if (unlikely(ret != 0))
650 goto err_unlock; 653 goto err_unlock;
651 654
652 if (vmw_bo->gmr_bound) {
653 vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
654 spin_lock(&bo->glob->lru_lock);
655 ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
656 spin_unlock(&bo->glob->lru_lock);
657 vmw_bo->gmr_bound = NULL;
658 }
659
660 ret = ttm_bo_validate(bo, &ne_placement, false, false); 655 ret = ttm_bo_validate(bo, &ne_placement, false, false);
661 ttm_bo_unreserve(bo); 656 ttm_bo_unreserve(bo);
662err_unlock: 657err_unlock:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 01feb48af333..39d43a01d846 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -29,6 +29,25 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31 31
32bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion;
36
37 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
38 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
39 return false;
40
41 hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
42 if (hwversion == 0)
43 return false;
44
45 if (hwversion < SVGA3D_HWVERSION_WS65_B1)
46 return false;
47
48 return true;
49}
50
32int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 51int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33{ 52{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 53 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -55,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
55 fifo->reserved_size = 0; 74 fifo->reserved_size = 0;
56 fifo->using_bounce_buffer = false; 75 fifo->using_bounce_buffer = false;
57 76
77 mutex_init(&fifo->fifo_mutex);
58 init_rwsem(&fifo->rwsem); 78 init_rwsem(&fifo->rwsem);
59 79
60 /* 80 /*
@@ -98,8 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
98 (unsigned int) min, 118 (unsigned int) min,
99 (unsigned int) fifo->capabilities); 119 (unsigned int) fifo->capabilities);
100 120
101 dev_priv->fence_seq = (uint32_t) -100; 121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
102 dev_priv->last_read_sequence = (uint32_t) -100;
103 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
104 123
105 return vmw_fifo_send_fence(dev_priv, &dummy); 124 return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -265,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
265 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 284 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
266 int ret; 285 int ret;
267 286
268 down_write(&fifo_state->rwsem); 287 mutex_lock(&fifo_state->fifo_mutex);
269 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 288 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
270 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 289 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
271 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 290 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -333,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
333 } 352 }
334out_err: 353out_err:
335 fifo_state->reserved_size = 0; 354 fifo_state->reserved_size = 0;
336 up_write(&fifo_state->rwsem); 355 mutex_unlock(&fifo_state->fifo_mutex);
337 return NULL; 356 return NULL;
338} 357}
339 358
@@ -408,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
408 427
409 } 428 }
410 429
430 down_write(&fifo_state->rwsem);
411 if (fifo_state->using_bounce_buffer || reserveable) { 431 if (fifo_state->using_bounce_buffer || reserveable) {
412 next_cmd += bytes; 432 next_cmd += bytes;
413 if (next_cmd >= max) 433 if (next_cmd >= max)
@@ -419,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
419 if (reserveable) 439 if (reserveable)
420 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 440 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
421 mb(); 441 mb();
422 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
423 up_write(&fifo_state->rwsem); 442 up_write(&fifo_state->rwsem);
443 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
444 mutex_unlock(&fifo_state->fifo_mutex);
424} 445}
425 446
426int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) 447int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -433,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
433 454
434 fm = vmw_fifo_reserve(dev_priv, bytes); 455 fm = vmw_fifo_reserve(dev_priv, bytes);
435 if (unlikely(fm == NULL)) { 456 if (unlikely(fm == NULL)) {
436 down_write(&fifo_state->rwsem); 457 *sequence = atomic_read(&dev_priv->fence_seq);
437 *sequence = dev_priv->fence_seq;
438 up_write(&fifo_state->rwsem);
439 ret = -ENOMEM; 458 ret = -ENOMEM;
440 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 459 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
441 false, 3*HZ); 460 false, 3*HZ);
@@ -443,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
443 } 462 }
444 463
445 do { 464 do {
446 *sequence = dev_priv->fence_seq++; 465 *sequence = atomic_add_return(1, &dev_priv->fence_seq);
447 } while (*sequence == 0); 466 } while (*sequence == 0);
448 467
449 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 468 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5fa6a4ed238a..1c7a316454d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -43,11 +43,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
43 param->value = vmw_overlay_num_free_overlays(dev_priv); 43 param->value = vmw_overlay_num_free_overlays(dev_priv);
44 break; 44 break;
45 case DRM_VMW_PARAM_3D: 45 case DRM_VMW_PARAM_3D:
46 param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0; 46 param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
47 break; 47 break;
48 case DRM_VMW_PARAM_FIFO_OFFSET: 48 case DRM_VMW_PARAM_FIFO_OFFSET:
49 param->value = dev_priv->mmio_start; 49 param->value = dev_priv->mmio_start;
50 break; 50 break;
51 case DRM_VMW_PARAM_HW_CAPS:
52 param->value = dev_priv->capabilities;
53 break;
54 case DRM_VMW_PARAM_FIFO_CAPS:
55 param->value = dev_priv->fifo.capabilities;
56 break;
51 default: 57 default:
52 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 58 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
53 param->param); 59 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index d40086fc8647..4d7cb5393860 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
85 return true; 85 return true;
86 86
87 /** 87 /**
88 * Below is to signal stale fences that have wrapped.
89 * First, block fence submission.
90 */
91
92 down_read(&fifo_state->rwsem);
93
94 /**
95 * Then check if the sequence is higher than what we've actually 88 * Then check if the sequence is higher than what we've actually
96 * emitted. Then the fence is stale and signaled. 89 * emitted. Then the fence is stale and signaled.
97 */ 90 */
98 91
99 ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); 92 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
100 up_read(&fifo_state->rwsem); 93 > VMW_FENCE_WRAP);
101 94
102 return ret; 95 return ret;
103} 96}
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
127 120
128 if (fifo_idle) 121 if (fifo_idle)
129 down_read(&fifo_state->rwsem); 122 down_read(&fifo_state->rwsem);
130 signal_seq = dev_priv->fence_seq; 123 signal_seq = atomic_read(&dev_priv->fence_seq);
131 ret = 0; 124 ret = 0;
132 125
133 for (;;) { 126 for (;;) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b1af76e371c3..31f9afed0a63 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -553,9 +553,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
553 } *cmd; 553 } *cmd;
554 int i, increment = 1; 554 int i, increment = 1;
555 555
556 if (!num_clips || 556 if (!num_clips) {
557 !(dev_priv->fifo.capabilities &
558 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
559 num_clips = 1; 557 num_clips = 1;
560 clips = &norect; 558 clips = &norect;
561 norect.x1 = norect.y1 = 0; 559 norect.x1 = norect.y1 = 0;
@@ -574,10 +572,10 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
574 572
575 for (i = 0; i < num_clips; i++, clips += increment) { 573 for (i = 0; i < num_clips; i++, clips += increment) {
576 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 574 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
577 cmd[i].body.x = cpu_to_le32(clips[i].x1); 575 cmd[i].body.x = cpu_to_le32(clips->x1);
578 cmd[i].body.y = cpu_to_le32(clips[i].y1); 576 cmd[i].body.y = cpu_to_le32(clips->y1);
579 cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); 577 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
580 cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); 578 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
581 } 579 }
582 580
583 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); 581 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
@@ -709,6 +707,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
709 if (ret) 707 if (ret)
710 goto try_dmabuf; 708 goto try_dmabuf;
711 709
710 if (!surface->scanout)
711 goto err_not_scanout;
712
712 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 713 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
713 mode_cmd->width, mode_cmd->height); 714 mode_cmd->width, mode_cmd->height);
714 715
@@ -742,6 +743,13 @@ try_dmabuf:
742 } 743 }
743 744
744 return &vfb->base; 745 return &vfb->base;
746
747err_not_scanout:
748 DRM_ERROR("surface not marked as scanout\n");
749 /* vmw_user_surface_lookup takes one ref */
750 vmw_surface_unreference(&surface);
751
752 return NULL;
745} 753}
746 754
747static int vmw_kms_fb_changed(struct drm_device *dev) 755static int vmw_kms_fb_changed(struct drm_device *dev)
@@ -761,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
761 769
762 drm_mode_config_init(dev); 770 drm_mode_config_init(dev);
763 dev->mode_config.funcs = &vmw_kms_funcs; 771 dev->mode_config.funcs = &vmw_kms_funcs;
764 dev->mode_config.min_width = 640; 772 dev->mode_config.min_width = 1;
765 dev->mode_config.min_height = 480; 773 dev->mode_config.min_height = 1;
766 dev->mode_config.max_width = 2048; 774 dev->mode_config.max_width = dev_priv->fb_max_width;
767 dev->mode_config.max_height = 2048; 775 dev->mode_config.max_height = dev_priv->fb_max_height;
768 776
769 ret = vmw_kms_init_legacy_display_system(dev_priv); 777 ret = vmw_kms_init_legacy_display_system(dev_priv);
770 778
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index bb6e6a096d25..5b6eabeb7f51 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -104,7 +104,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
104 bool pin, bool interruptible) 104 bool pin, bool interruptible)
105{ 105{
106 struct ttm_buffer_object *bo = &buf->base; 106 struct ttm_buffer_object *bo = &buf->base;
107 struct ttm_bo_global *glob = bo->glob;
108 struct ttm_placement *overlay_placement = &vmw_vram_placement; 107 struct ttm_placement *overlay_placement = &vmw_vram_placement;
109 int ret; 108 int ret;
110 109
@@ -116,14 +115,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
116 if (unlikely(ret != 0)) 115 if (unlikely(ret != 0))
117 goto err; 116 goto err;
118 117
119 if (buf->gmr_bound) {
120 vmw_gmr_unbind(dev_priv, buf->gmr_id);
121 spin_lock(&glob->lru_lock);
122 ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
123 spin_unlock(&glob->lru_lock);
124 buf->gmr_bound = NULL;
125 }
126
127 if (pin) 118 if (pin)
128 overlay_placement = &vmw_vram_ne_placement; 119 overlay_placement = &vmw_vram_ne_placement;
129 120
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c012d5927f65..f8fbbc67a406 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
574 574
575 srf->flags = req->flags; 575 srf->flags = req->flags;
576 srf->format = req->format; 576 srf->format = req->format;
577 srf->scanout = req->scanout;
577 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 578 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
578 srf->num_sizes = 0; 579 srf->num_sizes = 0;
579 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 580 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -599,6 +600,26 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
599 if (unlikely(ret != 0)) 600 if (unlikely(ret != 0))
600 goto out_err1; 601 goto out_err1;
601 602
603 if (srf->scanout &&
604 srf->num_sizes == 1 &&
605 srf->sizes[0].width == 64 &&
606 srf->sizes[0].height == 64 &&
607 srf->format == SVGA3D_A8R8G8B8) {
608
609 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
610 /* clear the image */
611 if (srf->snooper.image) {
612 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
613 } else {
614 DRM_ERROR("Failed to allocate cursor_image\n");
615 ret = -ENOMEM;
616 goto out_err1;
617 }
618 } else {
619 srf->snooper.image = NULL;
620 }
621 srf->snooper.crtc = NULL;
622
602 user_srf->base.shareable = false; 623 user_srf->base.shareable = false;
603 user_srf->base.tfile = NULL; 624 user_srf->base.tfile = NULL;
604 625
@@ -622,24 +643,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
622 return ret; 643 return ret;
623 } 644 }
624 645
625 if (srf->flags & (1 << 9) &&
626 srf->num_sizes == 1 &&
627 srf->sizes[0].width == 64 &&
628 srf->sizes[0].height == 64 &&
629 srf->format == SVGA3D_A8R8G8B8) {
630
631 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
632 /* clear the image */
633 if (srf->snooper.image)
634 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
635 else
636 DRM_ERROR("Failed to allocate cursor_image\n");
637
638 } else {
639 srf->snooper.image = NULL;
640 }
641 srf->snooper.crtc = NULL;
642
643 rep->sid = user_srf->base.hash.key; 646 rep->sid = user_srf->base.hash.key;
644 if (rep->sid == SVGA3D_INVALID_ID) 647 if (rep->sid == SVGA3D_INVALID_ID)
645 DRM_ERROR("Created bad Surface ID.\n"); 648 DRM_ERROR("Created bad Surface ID.\n");
@@ -754,20 +757,29 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
754 return bo_user_size + page_array_size; 757 return bo_user_size + page_array_size;
755} 758}
756 759
757void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 760void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
758{ 761{
759 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 762 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
760 struct ttm_bo_global *glob = bo->glob; 763 struct ttm_bo_global *glob = bo->glob;
761 struct vmw_private *dev_priv = 764 struct vmw_private *dev_priv =
762 container_of(bo->bdev, struct vmw_private, bdev); 765 container_of(bo->bdev, struct vmw_private, bdev);
763 766
764 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
765 if (vmw_bo->gmr_bound) { 767 if (vmw_bo->gmr_bound) {
766 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); 768 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
767 spin_lock(&glob->lru_lock); 769 spin_lock(&glob->lru_lock);
768 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); 770 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
769 spin_unlock(&glob->lru_lock); 771 spin_unlock(&glob->lru_lock);
772 vmw_bo->gmr_bound = false;
770 } 773 }
774}
775
776void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
777{
778 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
779 struct ttm_bo_global *glob = bo->glob;
780
781 vmw_dmabuf_gmr_unbind(bo);
782 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
771 kfree(vmw_bo); 783 kfree(vmw_bo);
772} 784}
773 785
@@ -813,18 +825,10 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
813static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 825static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
814{ 826{
815 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 827 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
816 struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
817 struct ttm_bo_global *glob = bo->glob; 828 struct ttm_bo_global *glob = bo->glob;
818 struct vmw_private *dev_priv =
819 container_of(bo->bdev, struct vmw_private, bdev);
820 829
830 vmw_dmabuf_gmr_unbind(bo);
821 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 831 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
822 if (vmw_bo->gmr_bound) {
823 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
824 spin_lock(&glob->lru_lock);
825 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
826 spin_unlock(&glob->lru_lock);
827 }
828 kfree(vmw_user_bo); 832 kfree(vmw_user_bo);
829} 833}
830 834
@@ -868,7 +872,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
868 } 872 }
869 873
870 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, 874 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
871 &vmw_vram_placement, true, 875 &vmw_vram_sys_placement, true,
872 &vmw_user_dmabuf_destroy); 876 &vmw_user_dmabuf_destroy);
873 if (unlikely(ret != 0)) 877 if (unlikely(ret != 0))
874 return ret; 878 return ret;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1ac0c93603c9..24b56dc54597 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
961 remaining -= 7; 961 remaining -= 7;
962 pr_devel("client 0x%p called 'target'\n", priv); 962 pr_devel("client 0x%p called 'target'\n", priv);
963 /* if target is default */ 963 /* if target is default */
964 if (!strncmp(buf, "default", 7)) 964 if (!strncmp(kbuf, "default", 7))
965 pdev = pci_dev_get(vga_default_device()); 965 pdev = pci_dev_get(vga_default_device());
966 else { 966 else {
967 if (!vga_pci_str_to_vars(curr_pos, remaining, 967 if (!vga_pci_str_to_vars(curr_pos, remaining,
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 4b96e7a898cf..5b4d66dc1a05 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -431,6 +431,13 @@ static const struct hid_device_id apple_devices[] = {
431 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 431 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
432 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS), 432 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
433 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 433 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
434 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
435 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
436 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
437 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
438 APPLE_ISO_KEYBOARD },
439 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS),
440 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
434 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY), 441 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY),
435 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 442 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
436 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY), 443 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 80792d38d25c..eabe5f87c6c1 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1285,6 +1285,9 @@ static const struct hid_device_id hid_blacklist[] = {
1285 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) }, 1285 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
1286 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) }, 1286 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
1287 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) }, 1287 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
1288 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1289 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1290 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
1288 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1291 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1289 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1292 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1290 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1293 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
@@ -1553,6 +1556,7 @@ static const struct hid_device_id hid_ignore_list[] = {
1553 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, 1556 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
1554 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, 1557 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
1555 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, 1558 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
1559 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
1556 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) }, 1560 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
1557 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) }, 1561 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
1558 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) }, 1562 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3839340e293a..010368e649ed 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -88,6 +88,9 @@
88#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 88#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
89#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 89#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
90#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 90#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
91#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
92#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
93#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
91#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 94#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
92#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 95#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
93#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 96#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
@@ -166,6 +169,9 @@
166#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f 169#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
167#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 170#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
168 171
172#define USB_VENDOR_ID_ETT 0x0664
173#define USB_DEVICE_ID_TC5UH 0x0309
174
169#define USB_VENDOR_ID_EZKEY 0x0518 175#define USB_VENDOR_ID_EZKEY 0x0518
170#define USB_DEVICE_ID_BTC_8193 0x0002 176#define USB_DEVICE_ID_BTC_8193 0x0002
171 177
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 5b222eed0692..510dd1340597 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -39,7 +39,17 @@
39 * 39 *
40 * 3. 135 byte report descriptor 40 * 3. 135 byte report descriptor
41 * Report #4 has an array field with logical range 0..17 instead of 1..14. 41 * Report #4 has an array field with logical range 0..17 instead of 1..14.
42 *
43 * 4. 171 byte report descriptor
44 * Report #3 has an array field with logical range 0..1 instead of 1..3.
42 */ 45 */
46static inline void samsung_dev_trace(struct hid_device *hdev,
47 unsigned int rsize)
48{
49 dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
50 "descriptor\n", rsize);
51}
52
43static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc, 53static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
44 unsigned int rsize) 54 unsigned int rsize)
45{ 55{
@@ -47,8 +57,7 @@ static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
47 rdesc[177] == 0x75 && rdesc[178] == 0x30 && 57 rdesc[177] == 0x75 && rdesc[178] == 0x30 &&
48 rdesc[179] == 0x95 && rdesc[180] == 0x01 && 58 rdesc[179] == 0x95 && rdesc[180] == 0x01 &&
49 rdesc[182] == 0x40) { 59 rdesc[182] == 0x40) {
50 dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report " 60 samsung_dev_trace(hdev, 184);
51 "descriptor\n", 184);
52 rdesc[176] = 0xff; 61 rdesc[176] = 0xff;
53 rdesc[178] = 0x08; 62 rdesc[178] = 0x08;
54 rdesc[180] = 0x06; 63 rdesc[180] = 0x06;
@@ -56,17 +65,21 @@ static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
56 } else 65 } else
57 if (rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 && 66 if (rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 &&
58 rdesc[194] == 0x25 && rdesc[195] == 0x12) { 67 rdesc[194] == 0x25 && rdesc[195] == 0x12) {
59 dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report " 68 samsung_dev_trace(hdev, 203);
60 "descriptor\n", 203);
61 rdesc[193] = 0x1; 69 rdesc[193] = 0x1;
62 rdesc[195] = 0xf; 70 rdesc[195] = 0xf;
63 } else 71 } else
64 if (rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 && 72 if (rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 &&
65 rdesc[126] == 0x25 && rdesc[127] == 0x11) { 73 rdesc[126] == 0x25 && rdesc[127] == 0x11) {
66 dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report " 74 samsung_dev_trace(hdev, 135);
67 "descriptor\n", 135);
68 rdesc[125] = 0x1; 75 rdesc[125] = 0x1;
69 rdesc[127] = 0xe; 76 rdesc[127] = 0xe;
77 } else
78 if (rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 &&
79 rdesc[162] == 0x25 && rdesc[163] == 0x01) {
80 samsung_dev_trace(hdev, 171);
81 rdesc[161] = 0x1;
82 rdesc[163] = 0x3;
70 } 83 }
71} 84}
72 85
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 747542172242..12dcda529201 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -142,6 +142,7 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
142 wdata->butstate = rw; 142 wdata->butstate = rw;
143 input_report_key(input, BTN_0, rw & 0x02); 143 input_report_key(input, BTN_0, rw & 0x02);
144 input_report_key(input, BTN_1, rw & 0x01); 144 input_report_key(input, BTN_1, rw & 0x01);
145 input_report_key(input, BTN_TOOL_FINGER, 0xf0);
145 input_event(input, EV_MSC, MSC_SERIAL, 0xf0); 146 input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
146 input_sync(input); 147 input_sync(input);
147 } 148 }
@@ -196,6 +197,9 @@ static int wacom_probe(struct hid_device *hdev,
196 /* Pad */ 197 /* Pad */
197 input->evbit[0] |= BIT(EV_MSC); 198 input->evbit[0] |= BIT(EV_MSC);
198 input->mscbit[0] |= BIT(MSC_SERIAL); 199 input->mscbit[0] |= BIT(MSC_SERIAL);
200 set_bit(BTN_0, input->keybit);
201 set_bit(BTN_1, input->keybit);
202 set_bit(BTN_TOOL_FINGER, input->keybit);
199 203
200 /* Distance, rubber and mouse */ 204 /* Distance, rubber and mouse */
201 input->absbit[0] |= BIT(ABS_DISTANCE); 205 input->absbit[0] |= BIT(ABS_DISTANCE);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 46c3c566307e..68cf87749a42 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -392,7 +392,7 @@ config SENSORS_GL520SM
392 392
393config SENSORS_CORETEMP 393config SENSORS_CORETEMP
394 tristate "Intel Core/Core2/Atom temperature sensor" 394 tristate "Intel Core/Core2/Atom temperature sensor"
395 depends on X86 && EXPERIMENTAL 395 depends on X86 && PCI && EXPERIMENTAL
396 help 396 help
397 If you say yes here you get support for the temperature 397 If you say yes here you get support for the temperature
398 sensor inside your CPU. Most of the family 6 CPUs 398 sensor inside your CPU. Most of the family 6 CPUs
@@ -792,6 +792,16 @@ config SENSORS_ADS7828
792 This driver can also be built as a module. If so, the module 792 This driver can also be built as a module. If so, the module
793 will be called ads7828. 793 will be called ads7828.
794 794
795config SENSORS_AMC6821
796 tristate "Texas Instruments AMC6821"
797 depends on I2C && EXPERIMENTAL
798 help
799 If you say yes here you get support for the Texas Instruments
800 AMC6821 hardware monitoring chips.
801
802 This driver can also be build as a module. If so, the module
803 will be called amc6821.
804
795config SENSORS_THMC50 805config SENSORS_THMC50
796 tristate "Texas Instruments THMC50 / Analog Devices ADM1022" 806 tristate "Texas Instruments THMC50 / Analog Devices ADM1022"
797 depends on I2C && EXPERIMENTAL 807 depends on I2C && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 450c8e894277..4bc215c0953f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
86obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o 86obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
87obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o 87obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
88obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o 88obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
89obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o
89obj-$(CONFIG_SENSORS_THMC50) += thmc50.o 90obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
90obj-$(CONFIG_SENSORS_TMP401) += tmp401.o 91obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
91obj-$(CONFIG_SENSORS_TMP421) += tmp421.o 92obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index a1a7ef14b519..b8156b4893bb 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -94,7 +94,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
94#define ADT7462_PIN24_SHIFT 6 94#define ADT7462_PIN24_SHIFT 6
95#define ADT7462_PIN26_VOLT_INPUT 0x08 95#define ADT7462_PIN26_VOLT_INPUT 0x08
96#define ADT7462_PIN25_VOLT_INPUT 0x20 96#define ADT7462_PIN25_VOLT_INPUT 0x20
97#define ADT7462_PIN28_SHIFT 6 /* cfg3 */ 97#define ADT7462_PIN28_SHIFT 4 /* cfg3 */
98#define ADT7462_PIN28_VOLT 0x5 98#define ADT7462_PIN28_VOLT 0x5
99 99
100#define ADT7462_REG_ALARM1 0xB8 100#define ADT7462_REG_ALARM1 0xB8
@@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
179 * 179 *
180 * Some, but not all, of these voltages have low/high limits. 180 * Some, but not all, of these voltages have low/high limits.
181 */ 181 */
182#define ADT7462_VOLT_COUNT 12 182#define ADT7462_VOLT_COUNT 13
183 183
184#define ADT7462_VENDOR 0x41 184#define ADT7462_VENDOR 0x41
185#define ADT7462_DEVICE 0x62 185#define ADT7462_DEVICE 0x62
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
new file mode 100644
index 000000000000..fa9708c2d723
--- /dev/null
+++ b/drivers/hwmon/amc6821.c
@@ -0,0 +1,1115 @@
1/*
2 amc6821.c - Part of lm_sensors, Linux kernel modules for hardware
3 monitoring
4 Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si>
5
6 Based on max6650.c:
7 Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22*/
23
24
25#include <linux/kernel.h> /* Needed for KERN_INFO */
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/slab.h>
29#include <linux/jiffies.h>
30#include <linux/i2c.h>
31#include <linux/hwmon.h>
32#include <linux/hwmon-sysfs.h>
33#include <linux/err.h>
34#include <linux/mutex.h>
35
36
37/*
38 * Addresses to scan.
39 */
40
41static const unsigned short normal_i2c[] = {0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e,
42 0x4c, 0x4d, 0x4e, I2C_CLIENT_END};
43
44
45
46/*
47 * Insmod parameters
48 */
49
50static int pwminv = 0; /*Inverted PWM output. */
51module_param(pwminv, int, S_IRUGO);
52
53static int init = 1; /*Power-on initialization.*/
54module_param(init, int, S_IRUGO);
55
56
57enum chips { amc6821 };
58
59#define AMC6821_REG_DEV_ID 0x3D
60#define AMC6821_REG_COMP_ID 0x3E
61#define AMC6821_REG_CONF1 0x00
62#define AMC6821_REG_CONF2 0x01
63#define AMC6821_REG_CONF3 0x3F
64#define AMC6821_REG_CONF4 0x04
65#define AMC6821_REG_STAT1 0x02
66#define AMC6821_REG_STAT2 0x03
67#define AMC6821_REG_TDATA_LOW 0x08
68#define AMC6821_REG_TDATA_HI 0x09
69#define AMC6821_REG_LTEMP_HI 0x0A
70#define AMC6821_REG_RTEMP_HI 0x0B
71#define AMC6821_REG_LTEMP_LIMIT_MIN 0x15
72#define AMC6821_REG_LTEMP_LIMIT_MAX 0x14
73#define AMC6821_REG_RTEMP_LIMIT_MIN 0x19
74#define AMC6821_REG_RTEMP_LIMIT_MAX 0x18
75#define AMC6821_REG_LTEMP_CRIT 0x1B
76#define AMC6821_REG_RTEMP_CRIT 0x1D
77#define AMC6821_REG_PSV_TEMP 0x1C
78#define AMC6821_REG_DCY 0x22
79#define AMC6821_REG_LTEMP_FAN_CTRL 0x24
80#define AMC6821_REG_RTEMP_FAN_CTRL 0x25
81#define AMC6821_REG_DCY_LOW_TEMP 0x21
82
83#define AMC6821_REG_TACH_LLIMITL 0x10
84#define AMC6821_REG_TACH_LLIMITH 0x11
85#define AMC6821_REG_TACH_HLIMITL 0x12
86#define AMC6821_REG_TACH_HLIMITH 0x13
87
88#define AMC6821_CONF1_START 0x01
89#define AMC6821_CONF1_FAN_INT_EN 0x02
90#define AMC6821_CONF1_FANIE 0x04
91#define AMC6821_CONF1_PWMINV 0x08
92#define AMC6821_CONF1_FAN_FAULT_EN 0x10
93#define AMC6821_CONF1_FDRC0 0x20
94#define AMC6821_CONF1_FDRC1 0x40
95#define AMC6821_CONF1_THERMOVIE 0x80
96
97#define AMC6821_CONF2_PWM_EN 0x01
98#define AMC6821_CONF2_TACH_MODE 0x02
99#define AMC6821_CONF2_TACH_EN 0x04
100#define AMC6821_CONF2_RTFIE 0x08
101#define AMC6821_CONF2_LTOIE 0x10
102#define AMC6821_CONF2_RTOIE 0x20
103#define AMC6821_CONF2_PSVIE 0x40
104#define AMC6821_CONF2_RST 0x80
105
106#define AMC6821_CONF3_THERM_FAN_EN 0x80
107#define AMC6821_CONF3_REV_MASK 0x0F
108
109#define AMC6821_CONF4_OVREN 0x10
110#define AMC6821_CONF4_TACH_FAST 0x20
111#define AMC6821_CONF4_PSPR 0x40
112#define AMC6821_CONF4_MODE 0x80
113
114#define AMC6821_STAT1_RPM_ALARM 0x01
115#define AMC6821_STAT1_FANS 0x02
116#define AMC6821_STAT1_RTH 0x04
117#define AMC6821_STAT1_RTL 0x08
118#define AMC6821_STAT1_R_THERM 0x10
119#define AMC6821_STAT1_RTF 0x20
120#define AMC6821_STAT1_LTH 0x40
121#define AMC6821_STAT1_LTL 0x80
122
123#define AMC6821_STAT2_RTC 0x08
124#define AMC6821_STAT2_LTC 0x10
125#define AMC6821_STAT2_LPSV 0x20
126#define AMC6821_STAT2_L_THERM 0x40
127#define AMC6821_STAT2_THERM_IN 0x80
128
129enum {IDX_TEMP1_INPUT = 0, IDX_TEMP1_MIN, IDX_TEMP1_MAX,
130 IDX_TEMP1_CRIT, IDX_TEMP2_INPUT, IDX_TEMP2_MIN,
131 IDX_TEMP2_MAX, IDX_TEMP2_CRIT,
132 TEMP_IDX_LEN, };
133
134static const u8 temp_reg[] = {AMC6821_REG_LTEMP_HI,
135 AMC6821_REG_LTEMP_LIMIT_MIN,
136 AMC6821_REG_LTEMP_LIMIT_MAX,
137 AMC6821_REG_LTEMP_CRIT,
138 AMC6821_REG_RTEMP_HI,
139 AMC6821_REG_RTEMP_LIMIT_MIN,
140 AMC6821_REG_RTEMP_LIMIT_MAX,
141 AMC6821_REG_RTEMP_CRIT, };
142
143enum {IDX_FAN1_INPUT = 0, IDX_FAN1_MIN, IDX_FAN1_MAX,
144 FAN1_IDX_LEN, };
145
146static const u8 fan_reg_low[] = {AMC6821_REG_TDATA_LOW,
147 AMC6821_REG_TACH_LLIMITL,
148 AMC6821_REG_TACH_HLIMITL, };
149
150
151static const u8 fan_reg_hi[] = {AMC6821_REG_TDATA_HI,
152 AMC6821_REG_TACH_LLIMITH,
153 AMC6821_REG_TACH_HLIMITH, };
154
155static int amc6821_probe(
156 struct i2c_client *client,
157 const struct i2c_device_id *id);
158static int amc6821_detect(
159 struct i2c_client *client,
160 struct i2c_board_info *info);
161static int amc6821_init_client(struct i2c_client *client);
162static int amc6821_remove(struct i2c_client *client);
163static struct amc6821_data *amc6821_update_device(struct device *dev);
164
165/*
166 * Driver data (common to all clients)
167 */
168
169static const struct i2c_device_id amc6821_id[] = {
170 { "amc6821", amc6821 },
171 { }
172};
173
174MODULE_DEVICE_TABLE(i2c, amc6821_id);
175
176static struct i2c_driver amc6821_driver = {
177 .class = I2C_CLASS_HWMON,
178 .driver = {
179 .name = "amc6821",
180 },
181 .probe = amc6821_probe,
182 .remove = amc6821_remove,
183 .id_table = amc6821_id,
184 .detect = amc6821_detect,
185 .address_list = normal_i2c,
186};
187
188
189/*
190 * Client data (each client gets its own)
191 */
192
193struct amc6821_data {
194 struct device *hwmon_dev;
195 struct mutex update_lock;
196 char valid; /* zero until following fields are valid */
197 unsigned long last_updated; /* in jiffies */
198
199 /* register values */
200 int temp[TEMP_IDX_LEN];
201
202 u16 fan[FAN1_IDX_LEN];
203 u8 fan1_div;
204
205 u8 pwm1;
206 u8 temp1_auto_point_temp[3];
207 u8 temp2_auto_point_temp[3];
208 u8 pwm1_auto_point_pwm[3];
209 u8 pwm1_enable;
210 u8 pwm1_auto_channels_temp;
211
212 u8 stat1;
213 u8 stat2;
214};
215
216
217static ssize_t get_temp(
218 struct device *dev,
219 struct device_attribute *devattr,
220 char *buf)
221{
222 struct amc6821_data *data = amc6821_update_device(dev);
223 int ix = to_sensor_dev_attr(devattr)->index;
224
225 return sprintf(buf, "%d\n", data->temp[ix] * 1000);
226}
227
228
229
230static ssize_t set_temp(
231 struct device *dev,
232 struct device_attribute *attr,
233 const char *buf,
234 size_t count)
235{
236 struct i2c_client *client = to_i2c_client(dev);
237 struct amc6821_data *data = i2c_get_clientdata(client);
238 int ix = to_sensor_dev_attr(attr)->index;
239 long val;
240
241 int ret = strict_strtol(buf, 10, &val);
242 if (ret)
243 return ret;
244 val = SENSORS_LIMIT(val / 1000, -128, 127);
245
246 mutex_lock(&data->update_lock);
247 data->temp[ix] = val;
248 if (i2c_smbus_write_byte_data(client, temp_reg[ix], data->temp[ix])) {
249 dev_err(&client->dev, "Register write error, aborting.\n");
250 count = -EIO;
251 }
252 mutex_unlock(&data->update_lock);
253 return count;
254}
255
256
257
258
259static ssize_t get_temp_alarm(
260 struct device *dev,
261 struct device_attribute *devattr,
262 char *buf)
263{
264 struct amc6821_data *data = amc6821_update_device(dev);
265 int ix = to_sensor_dev_attr(devattr)->index;
266 u8 flag;
267
268 switch (ix) {
269 case IDX_TEMP1_MIN:
270 flag = data->stat1 & AMC6821_STAT1_LTL;
271 break;
272 case IDX_TEMP1_MAX:
273 flag = data->stat1 & AMC6821_STAT1_LTH;
274 break;
275 case IDX_TEMP1_CRIT:
276 flag = data->stat2 & AMC6821_STAT2_LTC;
277 break;
278 case IDX_TEMP2_MIN:
279 flag = data->stat1 & AMC6821_STAT1_RTL;
280 break;
281 case IDX_TEMP2_MAX:
282 flag = data->stat1 & AMC6821_STAT1_RTH;
283 break;
284 case IDX_TEMP2_CRIT:
285 flag = data->stat2 & AMC6821_STAT2_RTC;
286 break;
287 default:
288 dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
289 return -EINVAL;
290 }
291 if (flag)
292 return sprintf(buf, "1");
293 else
294 return sprintf(buf, "0");
295}
296
297
298
299
300static ssize_t get_temp2_fault(
301 struct device *dev,
302 struct device_attribute *devattr,
303 char *buf)
304{
305 struct amc6821_data *data = amc6821_update_device(dev);
306 if (data->stat1 & AMC6821_STAT1_RTF)
307 return sprintf(buf, "1");
308 else
309 return sprintf(buf, "0");
310}
311
312static ssize_t get_pwm1(
313 struct device *dev,
314 struct device_attribute *devattr,
315 char *buf)
316{
317 struct amc6821_data *data = amc6821_update_device(dev);
318 return sprintf(buf, "%d\n", data->pwm1);
319}
320
321static ssize_t set_pwm1(
322 struct device *dev,
323 struct device_attribute *devattr,
324 const char *buf,
325 size_t count)
326{
327 struct i2c_client *client = to_i2c_client(dev);
328 struct amc6821_data *data = i2c_get_clientdata(client);
329 long val;
330 int ret = strict_strtol(buf, 10, &val);
331 if (ret)
332 return ret;
333
334 mutex_lock(&data->update_lock);
335 data->pwm1 = SENSORS_LIMIT(val , 0, 255);
336 i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1);
337 mutex_unlock(&data->update_lock);
338 return count;
339}
340
341static ssize_t get_pwm1_enable(
342 struct device *dev,
343 struct device_attribute *devattr,
344 char *buf)
345{
346 struct amc6821_data *data = amc6821_update_device(dev);
347 return sprintf(buf, "%d\n", data->pwm1_enable);
348}
349
350static ssize_t set_pwm1_enable(
351 struct device *dev,
352 struct device_attribute *attr,
353 const char *buf,
354 size_t count)
355{
356 struct i2c_client *client = to_i2c_client(dev);
357 struct amc6821_data *data = i2c_get_clientdata(client);
358 long val;
359 int config = strict_strtol(buf, 10, &val);
360 if (config)
361 return config;
362
363 config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
364 if (config < 0) {
365 dev_err(&client->dev,
366 "Error reading configuration register, aborting.\n");
367 return -EIO;
368 }
369
370 switch (val) {
371 case 1:
372 config &= ~AMC6821_CONF1_FDRC0;
373 config &= ~AMC6821_CONF1_FDRC1;
374 break;
375 case 2:
376 config &= ~AMC6821_CONF1_FDRC0;
377 config |= AMC6821_CONF1_FDRC1;
378 break;
379 case 3:
380 config |= AMC6821_CONF1_FDRC0;
381 config |= AMC6821_CONF1_FDRC1;
382 break;
383 default:
384 return -EINVAL;
385 }
386 mutex_lock(&data->update_lock);
387 if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) {
388 dev_err(&client->dev,
389 "Configuration register write error, aborting.\n");
390 count = -EIO;
391 }
392 mutex_unlock(&data->update_lock);
393 return count;
394}
395
396
397static ssize_t get_pwm1_auto_channels_temp(
398 struct device *dev,
399 struct device_attribute *devattr,
400 char *buf)
401{
402 struct amc6821_data *data = amc6821_update_device(dev);
403 return sprintf(buf, "%d\n", data->pwm1_auto_channels_temp);
404}
405
406
407static ssize_t get_temp_auto_point_temp(
408 struct device *dev,
409 struct device_attribute *devattr,
410 char *buf)
411{
412 int ix = to_sensor_dev_attr_2(devattr)->index;
413 int nr = to_sensor_dev_attr_2(devattr)->nr;
414 struct amc6821_data *data = amc6821_update_device(dev);
415 switch (nr) {
416 case 1:
417 return sprintf(buf, "%d\n",
418 data->temp1_auto_point_temp[ix] * 1000);
419 break;
420 case 2:
421 return sprintf(buf, "%d\n",
422 data->temp2_auto_point_temp[ix] * 1000);
423 break;
424 default:
425 dev_dbg(dev, "Unknown attr->nr (%d).\n", nr);
426 return -EINVAL;
427 }
428}
429
430
431static ssize_t get_pwm1_auto_point_pwm(
432 struct device *dev,
433 struct device_attribute *devattr,
434 char *buf)
435{
436 int ix = to_sensor_dev_attr(devattr)->index;
437 struct amc6821_data *data = amc6821_update_device(dev);
438 return sprintf(buf, "%d\n", data->pwm1_auto_point_pwm[ix]);
439}
440
441
442static inline ssize_t set_slope_register(struct i2c_client *client,
443 u8 reg,
444 u8 dpwm,
445 u8 *ptemp)
446{
447 int dt;
448 u8 tmp;
449
450 dt = ptemp[2]-ptemp[1];
451 for (tmp = 4; tmp > 0; tmp--) {
452 if (dt * (0x20 >> tmp) >= dpwm)
453 break;
454 }
455 tmp |= (ptemp[1] & 0x7C) << 1;
456 if (i2c_smbus_write_byte_data(client,
457 reg, tmp)) {
458 dev_err(&client->dev, "Register write error, aborting.\n");
459 return -EIO;
460 }
461 return 0;
462}
463
464
465
466static ssize_t set_temp_auto_point_temp(
467 struct device *dev,
468 struct device_attribute *attr,
469 const char *buf,
470 size_t count)
471{
472 struct i2c_client *client = to_i2c_client(dev);
473 struct amc6821_data *data = amc6821_update_device(dev);
474 int ix = to_sensor_dev_attr_2(attr)->index;
475 int nr = to_sensor_dev_attr_2(attr)->nr;
476 u8 *ptemp;
477 u8 reg;
478 int dpwm;
479 long val;
480 int ret = strict_strtol(buf, 10, &val);
481 if (ret)
482 return ret;
483
484 switch (nr) {
485 case 1:
486 ptemp = data->temp1_auto_point_temp;
487 reg = AMC6821_REG_LTEMP_FAN_CTRL;
488 break;
489 case 2:
490 ptemp = data->temp2_auto_point_temp;
491 reg = AMC6821_REG_RTEMP_FAN_CTRL;
492 break;
493 default:
494 dev_dbg(dev, "Unknown attr->nr (%d).\n", nr);
495 return -EINVAL;
496 }
497
498 data->valid = 0;
499 mutex_lock(&data->update_lock);
500 switch (ix) {
501 case 0:
502 ptemp[0] = SENSORS_LIMIT(val / 1000, 0,
503 data->temp1_auto_point_temp[1]);
504 ptemp[0] = SENSORS_LIMIT(ptemp[0], 0,
505 data->temp2_auto_point_temp[1]);
506 ptemp[0] = SENSORS_LIMIT(ptemp[0], 0, 63);
507 if (i2c_smbus_write_byte_data(
508 client,
509 AMC6821_REG_PSV_TEMP,
510 ptemp[0])) {
511 dev_err(&client->dev,
512 "Register write error, aborting.\n");
513 count = -EIO;
514 }
515 goto EXIT;
516 break;
517 case 1:
518 ptemp[1] = SENSORS_LIMIT(
519 val / 1000,
520 (ptemp[0] & 0x7C) + 4,
521 124);
522 ptemp[1] &= 0x7C;
523 ptemp[2] = SENSORS_LIMIT(
524 ptemp[2], ptemp[1] + 1,
525 255);
526 break;
527 case 2:
528 ptemp[2] = SENSORS_LIMIT(
529 val / 1000,
530 ptemp[1]+1,
531 255);
532 break;
533 default:
534 dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
535 count = -EINVAL;
536 goto EXIT;
537 }
538 dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1];
539 if (set_slope_register(client, reg, dpwm, ptemp))
540 count = -EIO;
541
542EXIT:
543 mutex_unlock(&data->update_lock);
544 return count;
545}
546
547
548
549static ssize_t set_pwm1_auto_point_pwm(
550 struct device *dev,
551 struct device_attribute *attr,
552 const char *buf,
553 size_t count)
554{
555 struct i2c_client *client = to_i2c_client(dev);
556 struct amc6821_data *data = i2c_get_clientdata(client);
557 int dpwm;
558 long val;
559 int ret = strict_strtol(buf, 10, &val);
560 if (ret)
561 return ret;
562
563 mutex_lock(&data->update_lock);
564 data->pwm1_auto_point_pwm[1] = SENSORS_LIMIT(val, 0, 254);
565 if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP,
566 data->pwm1_auto_point_pwm[1])) {
567 dev_err(&client->dev, "Register write error, aborting.\n");
568 count = -EIO;
569 goto EXIT;
570 }
571 dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1];
572 if (set_slope_register(client, AMC6821_REG_LTEMP_FAN_CTRL, dpwm,
573 data->temp1_auto_point_temp)) {
574 count = -EIO;
575 goto EXIT;
576 }
577 if (set_slope_register(client, AMC6821_REG_RTEMP_FAN_CTRL, dpwm,
578 data->temp2_auto_point_temp)) {
579 count = -EIO;
580 goto EXIT;
581 }
582
583EXIT:
584 data->valid = 0;
585 mutex_unlock(&data->update_lock);
586 return count;
587}
588
589static ssize_t get_fan(
590 struct device *dev,
591 struct device_attribute *devattr,
592 char *buf)
593{
594 struct amc6821_data *data = amc6821_update_device(dev);
595 int ix = to_sensor_dev_attr(devattr)->index;
596 if (0 == data->fan[ix])
597 return sprintf(buf, "0");
598 return sprintf(buf, "%d\n", (int)(6000000 / data->fan[ix]));
599}
600
601
602
603static ssize_t get_fan1_fault(
604 struct device *dev,
605 struct device_attribute *devattr,
606 char *buf)
607{
608 struct amc6821_data *data = amc6821_update_device(dev);
609 if (data->stat1 & AMC6821_STAT1_FANS)
610 return sprintf(buf, "1");
611 else
612 return sprintf(buf, "0");
613}
614
615
616
617static ssize_t set_fan(
618 struct device *dev,
619 struct device_attribute *attr,
620 const char *buf, size_t count)
621{
622 struct i2c_client *client = to_i2c_client(dev);
623 struct amc6821_data *data = i2c_get_clientdata(client);
624 long val;
625 int ix = to_sensor_dev_attr(attr)->index;
626 int ret = strict_strtol(buf, 10, &val);
627 if (ret)
628 return ret;
629 val = 1 > val ? 0xFFFF : 6000000/val;
630
631 mutex_lock(&data->update_lock);
632 data->fan[ix] = (u16) SENSORS_LIMIT(val, 1, 0xFFFF);
633 if (i2c_smbus_write_byte_data(client, fan_reg_low[ix],
634 data->fan[ix] & 0xFF)) {
635 dev_err(&client->dev, "Register write error, aborting.\n");
636 count = -EIO;
637 goto EXIT;
638 }
639 if (i2c_smbus_write_byte_data(client,
640 fan_reg_hi[ix], data->fan[ix] >> 8)) {
641 dev_err(&client->dev, "Register write error, aborting.\n");
642 count = -EIO;
643 }
644EXIT:
645 mutex_unlock(&data->update_lock);
646 return count;
647}
648
649
650
651static ssize_t get_fan1_div(
652 struct device *dev,
653 struct device_attribute *devattr,
654 char *buf)
655{
656 struct amc6821_data *data = amc6821_update_device(dev);
657 return sprintf(buf, "%d\n", data->fan1_div);
658}
659
660static ssize_t set_fan1_div(
661 struct device *dev,
662 struct device_attribute *attr,
663 const char *buf, size_t count)
664{
665 struct i2c_client *client = to_i2c_client(dev);
666 struct amc6821_data *data = i2c_get_clientdata(client);
667 long val;
668 int config = strict_strtol(buf, 10, &val);
669 if (config)
670 return config;
671
672 config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
673 if (config < 0) {
674 dev_err(&client->dev,
675 "Error reading configuration register, aborting.\n");
676 return -EIO;
677 }
678 mutex_lock(&data->update_lock);
679 switch (val) {
680 case 2:
681 config &= ~AMC6821_CONF4_PSPR;
682 data->fan1_div = 2;
683 break;
684 case 4:
685 config |= AMC6821_CONF4_PSPR;
686 data->fan1_div = 4;
687 break;
688 default:
689 count = -EINVAL;
690 goto EXIT;
691 }
692 if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4, config)) {
693 dev_err(&client->dev,
694 "Configuration register write error, aborting.\n");
695 count = -EIO;
696 }
697EXIT:
698 mutex_unlock(&data->update_lock);
699 return count;
700}
701
702
703
704static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
705 get_temp, NULL, IDX_TEMP1_INPUT);
706static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, get_temp,
707 set_temp, IDX_TEMP1_MIN);
708static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, get_temp,
709 set_temp, IDX_TEMP1_MAX);
710static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, get_temp,
711 set_temp, IDX_TEMP1_CRIT);
712static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
713 get_temp_alarm, NULL, IDX_TEMP1_MIN);
714static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
715 get_temp_alarm, NULL, IDX_TEMP1_MAX);
716static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
717 get_temp_alarm, NULL, IDX_TEMP1_CRIT);
718static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR,
719 get_temp, NULL, IDX_TEMP2_INPUT);
720static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
721 set_temp, IDX_TEMP2_MIN);
722static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, get_temp,
723 set_temp, IDX_TEMP2_MAX);
724static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR, get_temp,
725 set_temp, IDX_TEMP2_CRIT);
726static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO,
727 get_temp2_fault, NULL, 0);
728static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO,
729 get_temp_alarm, NULL, IDX_TEMP2_MIN);
730static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO,
731 get_temp_alarm, NULL, IDX_TEMP2_MAX);
732static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO,
733 get_temp_alarm, NULL, IDX_TEMP2_CRIT);
734static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, IDX_FAN1_INPUT);
735static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR,
736 get_fan, set_fan, IDX_FAN1_MIN);
737static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO | S_IWUSR,
738 get_fan, set_fan, IDX_FAN1_MAX);
739static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_fan1_fault, NULL, 0);
740static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
741 get_fan1_div, set_fan1_div, 0);
742
743static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm1, set_pwm1, 0);
744static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
745 get_pwm1_enable, set_pwm1_enable, 0);
746static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO,
747 get_pwm1_auto_point_pwm, NULL, 0);
748static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO,
749 get_pwm1_auto_point_pwm, set_pwm1_auto_point_pwm, 1);
750static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO,
751 get_pwm1_auto_point_pwm, NULL, 2);
752static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO,
753 get_pwm1_auto_channels_temp, NULL, 0);
754static SENSOR_DEVICE_ATTR_2(temp1_auto_point1_temp, S_IRUGO,
755 get_temp_auto_point_temp, NULL, 1, 0);
756static SENSOR_DEVICE_ATTR_2(temp1_auto_point2_temp, S_IWUSR | S_IRUGO,
757 get_temp_auto_point_temp, set_temp_auto_point_temp, 1, 1);
758static SENSOR_DEVICE_ATTR_2(temp1_auto_point3_temp, S_IWUSR | S_IRUGO,
759 get_temp_auto_point_temp, set_temp_auto_point_temp, 1, 2);
760
761static SENSOR_DEVICE_ATTR_2(temp2_auto_point1_temp, S_IWUSR | S_IRUGO,
762 get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 0);
763static SENSOR_DEVICE_ATTR_2(temp2_auto_point2_temp, S_IWUSR | S_IRUGO,
764 get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 1);
765static SENSOR_DEVICE_ATTR_2(temp2_auto_point3_temp, S_IWUSR | S_IRUGO,
766 get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 2);
767
768
769
770static struct attribute *amc6821_attrs[] = {
771 &sensor_dev_attr_temp1_input.dev_attr.attr,
772 &sensor_dev_attr_temp1_min.dev_attr.attr,
773 &sensor_dev_attr_temp1_max.dev_attr.attr,
774 &sensor_dev_attr_temp1_crit.dev_attr.attr,
775 &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
776 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
777 &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
778 &sensor_dev_attr_temp2_input.dev_attr.attr,
779 &sensor_dev_attr_temp2_min.dev_attr.attr,
780 &sensor_dev_attr_temp2_max.dev_attr.attr,
781 &sensor_dev_attr_temp2_crit.dev_attr.attr,
782 &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
783 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
784 &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
785 &sensor_dev_attr_temp2_fault.dev_attr.attr,
786 &sensor_dev_attr_fan1_input.dev_attr.attr,
787 &sensor_dev_attr_fan1_min.dev_attr.attr,
788 &sensor_dev_attr_fan1_max.dev_attr.attr,
789 &sensor_dev_attr_fan1_fault.dev_attr.attr,
790 &sensor_dev_attr_fan1_div.dev_attr.attr,
791 &sensor_dev_attr_pwm1.dev_attr.attr,
792 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
793 &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
794 &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
795 &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
796 &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
797 &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
798 &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
799 &sensor_dev_attr_temp1_auto_point3_temp.dev_attr.attr,
800 &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
801 &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
802 &sensor_dev_attr_temp2_auto_point3_temp.dev_attr.attr,
803 NULL
804};
805
806static struct attribute_group amc6821_attr_grp = {
807 .attrs = amc6821_attrs,
808};
809
810
811
812/* Return 0 if detection is successful, -ENODEV otherwise */
813static int amc6821_detect(
814 struct i2c_client *client,
815 struct i2c_board_info *info)
816{
817 struct i2c_adapter *adapter = client->adapter;
818 int address = client->addr;
819 int dev_id, comp_id;
820
821 dev_dbg(&adapter->dev, "amc6821_detect called.\n");
822
823 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
824 dev_dbg(&adapter->dev,
825 "amc6821: I2C bus doesn't support byte mode, "
826 "skipping.\n");
827 return -ENODEV;
828 }
829
830 dev_id = i2c_smbus_read_byte_data(client, AMC6821_REG_DEV_ID);
831 comp_id = i2c_smbus_read_byte_data(client, AMC6821_REG_COMP_ID);
832 if (dev_id != 0x21 || comp_id != 0x49) {
833 dev_dbg(&adapter->dev,
834 "amc6821: detection failed at 0x%02x.\n",
835 address);
836 return -ENODEV;
837 }
838
839 /* Bit 7 of the address register is ignored, so we can check the
840 ID registers again */
841 dev_id = i2c_smbus_read_byte_data(client, 0x80 | AMC6821_REG_DEV_ID);
842 comp_id = i2c_smbus_read_byte_data(client, 0x80 | AMC6821_REG_COMP_ID);
843 if (dev_id != 0x21 || comp_id != 0x49) {
844 dev_dbg(&adapter->dev,
845 "amc6821: detection failed at 0x%02x.\n",
846 address);
847 return -ENODEV;
848 }
849
850 dev_info(&adapter->dev, "amc6821: chip found at 0x%02x.\n", address);
851 strlcpy(info->type, "amc6821", I2C_NAME_SIZE);
852
853 return 0;
854}
855
856static int amc6821_probe(
857 struct i2c_client *client,
858 const struct i2c_device_id *id)
859{
860 struct amc6821_data *data;
861 int err;
862
863 data = kzalloc(sizeof(struct amc6821_data), GFP_KERNEL);
864 if (!data) {
865 dev_err(&client->dev, "out of memory.\n");
866 return -ENOMEM;
867 }
868
869
870 i2c_set_clientdata(client, data);
871 mutex_init(&data->update_lock);
872
873 /*
874 * Initialize the amc6821 chip
875 */
876 err = amc6821_init_client(client);
877 if (err)
878 goto err_free;
879
880 err = sysfs_create_group(&client->dev.kobj, &amc6821_attr_grp);
881 if (err)
882 goto err_free;
883
884 data->hwmon_dev = hwmon_device_register(&client->dev);
885 if (!IS_ERR(data->hwmon_dev))
886 return 0;
887
888 err = PTR_ERR(data->hwmon_dev);
889 dev_err(&client->dev, "error registering hwmon device.\n");
890 sysfs_remove_group(&client->dev.kobj, &amc6821_attr_grp);
891err_free:
892 kfree(data);
893 return err;
894}
895
896static int amc6821_remove(struct i2c_client *client)
897{
898 struct amc6821_data *data = i2c_get_clientdata(client);
899
900 hwmon_device_unregister(data->hwmon_dev);
901 sysfs_remove_group(&client->dev.kobj, &amc6821_attr_grp);
902
903 kfree(data);
904
905 return 0;
906}
907
908
909static int amc6821_init_client(struct i2c_client *client)
910{
911 int config;
912 int err = -EIO;
913
914 if (init) {
915 config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
916
917 if (config < 0) {
918 dev_err(&client->dev,
919 "Error reading configuration register, aborting.\n");
920 return err;
921 }
922
923 config |= AMC6821_CONF4_MODE;
924
925 if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4,
926 config)) {
927 dev_err(&client->dev,
928 "Configuration register write error, aborting.\n");
929 return err;
930 }
931
932 config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF3);
933
934 if (config < 0) {
935 dev_err(&client->dev,
936 "Error reading configuration register, aborting.\n");
937 return err;
938 }
939
940 dev_info(&client->dev, "Revision %d\n", config & 0x0f);
941
942 config &= ~AMC6821_CONF3_THERM_FAN_EN;
943
944 if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF3,
945 config)) {
946 dev_err(&client->dev,
947 "Configuration register write error, aborting.\n");
948 return err;
949 }
950
951 config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF2);
952
953 if (config < 0) {
954 dev_err(&client->dev,
955 "Error reading configuration register, aborting.\n");
956 return err;
957 }
958
959 config &= ~AMC6821_CONF2_RTFIE;
960 config &= ~AMC6821_CONF2_LTOIE;
961 config &= ~AMC6821_CONF2_RTOIE;
962 if (i2c_smbus_write_byte_data(client,
963 AMC6821_REG_CONF2, config)) {
964 dev_err(&client->dev,
965 "Configuration register write error, aborting.\n");
966 return err;
967 }
968
969 config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
970
971 if (config < 0) {
972 dev_err(&client->dev,
973 "Error reading configuration register, aborting.\n");
974 return err;
975 }
976
977 config &= ~AMC6821_CONF1_THERMOVIE;
978 config &= ~AMC6821_CONF1_FANIE;
979 config |= AMC6821_CONF1_START;
980 if (pwminv)
981 config |= AMC6821_CONF1_PWMINV;
982 else
983 config &= ~AMC6821_CONF1_PWMINV;
984
985 if (i2c_smbus_write_byte_data(
986 client, AMC6821_REG_CONF1, config)) {
987 dev_err(&client->dev,
988 "Configuration register write error, aborting.\n");
989 return err;
990 }
991 }
992 return 0;
993}
994
995
996static struct amc6821_data *amc6821_update_device(struct device *dev)
997{
998 struct i2c_client *client = to_i2c_client(dev);
999 struct amc6821_data *data = i2c_get_clientdata(client);
1000 int timeout = HZ;
1001 u8 reg;
1002 int i;
1003
1004 mutex_lock(&data->update_lock);
1005
1006 if (time_after(jiffies, data->last_updated + timeout) ||
1007 !data->valid) {
1008
1009 for (i = 0; i < TEMP_IDX_LEN; i++)
1010 data->temp[i] = i2c_smbus_read_byte_data(client,
1011 temp_reg[i]);
1012
1013 data->stat1 = i2c_smbus_read_byte_data(client,
1014 AMC6821_REG_STAT1);
1015 data->stat2 = i2c_smbus_read_byte_data(client,
1016 AMC6821_REG_STAT2);
1017
1018 data->pwm1 = i2c_smbus_read_byte_data(client,
1019 AMC6821_REG_DCY);
1020 for (i = 0; i < FAN1_IDX_LEN; i++) {
1021 data->fan[i] = i2c_smbus_read_byte_data(
1022 client,
1023 fan_reg_low[i]);
1024 data->fan[i] += i2c_smbus_read_byte_data(
1025 client,
1026 fan_reg_hi[i]) << 8;
1027 }
1028 data->fan1_div = i2c_smbus_read_byte_data(client,
1029 AMC6821_REG_CONF4);
1030 data->fan1_div = data->fan1_div & AMC6821_CONF4_PSPR ? 4 : 2;
1031
1032 data->pwm1_auto_point_pwm[0] = 0;
1033 data->pwm1_auto_point_pwm[2] = 255;
1034 data->pwm1_auto_point_pwm[1] = i2c_smbus_read_byte_data(client,
1035 AMC6821_REG_DCY_LOW_TEMP);
1036
1037 data->temp1_auto_point_temp[0] =
1038 i2c_smbus_read_byte_data(client,
1039 AMC6821_REG_PSV_TEMP);
1040 data->temp2_auto_point_temp[0] =
1041 data->temp1_auto_point_temp[0];
1042 reg = i2c_smbus_read_byte_data(client,
1043 AMC6821_REG_LTEMP_FAN_CTRL);
1044 data->temp1_auto_point_temp[1] = (reg & 0xF8) >> 1;
1045 reg &= 0x07;
1046 reg = 0x20 >> reg;
1047 if (reg > 0)
1048 data->temp1_auto_point_temp[2] =
1049 data->temp1_auto_point_temp[1] +
1050 (data->pwm1_auto_point_pwm[2] -
1051 data->pwm1_auto_point_pwm[1]) / reg;
1052 else
1053 data->temp1_auto_point_temp[2] = 255;
1054
1055 reg = i2c_smbus_read_byte_data(client,
1056 AMC6821_REG_RTEMP_FAN_CTRL);
1057 data->temp2_auto_point_temp[1] = (reg & 0xF8) >> 1;
1058 reg &= 0x07;
1059 reg = 0x20 >> reg;
1060 if (reg > 0)
1061 data->temp2_auto_point_temp[2] =
1062 data->temp2_auto_point_temp[1] +
1063 (data->pwm1_auto_point_pwm[2] -
1064 data->pwm1_auto_point_pwm[1]) / reg;
1065 else
1066 data->temp2_auto_point_temp[2] = 255;
1067
1068 reg = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
1069 reg = (reg >> 5) & 0x3;
1070 switch (reg) {
1071 case 0: /*open loop: software sets pwm1*/
1072 data->pwm1_auto_channels_temp = 0;
1073 data->pwm1_enable = 1;
1074 break;
1075 case 2: /*closed loop: remote T (temp2)*/
1076 data->pwm1_auto_channels_temp = 2;
1077 data->pwm1_enable = 2;
1078 break;
1079 case 3: /*closed loop: local and remote T (temp2)*/
1080 data->pwm1_auto_channels_temp = 3;
1081 data->pwm1_enable = 3;
1082 break;
1083 case 1: /*semi-open loop: software sets rpm, chip controls pwm1,
1084 *currently not implemented
1085 */
1086 data->pwm1_auto_channels_temp = 0;
1087 data->pwm1_enable = 0;
1088 break;
1089 }
1090
1091 data->last_updated = jiffies;
1092 data->valid = 1;
1093 }
1094 mutex_unlock(&data->update_lock);
1095 return data;
1096}
1097
1098
1099static int __init amc6821_init(void)
1100{
1101 return i2c_add_driver(&amc6821_driver);
1102}
1103
1104static void __exit amc6821_exit(void)
1105{
1106 i2c_del_driver(&amc6821_driver);
1107}
1108
1109module_init(amc6821_init);
1110module_exit(amc6821_exit);
1111
1112
1113MODULE_LICENSE("GPL");
1114MODULE_AUTHOR("T. Mertelj <tomaz.mertelj@guest.arnes.si>");
1115MODULE_DESCRIPTION("Texas Instruments amc6821 hwmon driver");
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 5a3ee00c0e7d..028284f544e3 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -5,6 +5,7 @@
5 * See COPYING in the top level directory of the kernel tree. 5 * See COPYING in the top level directory of the kernel tree.
6 */ 6 */
7 7
8#include <linux/debugfs.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/hwmon.h> 10#include <linux/hwmon.h>
10#include <linux/list.h> 11#include <linux/list.h>
@@ -101,6 +102,11 @@ struct atk_data {
101 int temperature_count; 102 int temperature_count;
102 int fan_count; 103 int fan_count;
103 struct list_head sensor_list; 104 struct list_head sensor_list;
105
106 struct {
107 struct dentry *root;
108 u32 id;
109 } debugfs;
104}; 110};
105 111
106 112
@@ -624,6 +630,187 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
624 return err; 630 return err;
625} 631}
626 632
633#ifdef CONFIG_DEBUG_FS
634static int atk_debugfs_gitm_get(void *p, u64 *val)
635{
636 struct atk_data *data = p;
637 union acpi_object *ret;
638 struct atk_acpi_ret_buffer *buf;
639 int err = 0;
640
641 if (!data->read_handle)
642 return -ENODEV;
643
644 if (!data->debugfs.id)
645 return -EINVAL;
646
647 ret = atk_gitm(data, data->debugfs.id);
648 if (IS_ERR(ret))
649 return PTR_ERR(ret);
650
651 buf = (struct atk_acpi_ret_buffer *)ret->buffer.pointer;
652 if (buf->flags)
653 *val = buf->value;
654 else
655 err = -EIO;
656
657 return err;
658}
659
660DEFINE_SIMPLE_ATTRIBUTE(atk_debugfs_gitm,
661 atk_debugfs_gitm_get,
662 NULL,
663 "0x%08llx\n")
664
665static int atk_acpi_print(char *buf, size_t sz, union acpi_object *obj)
666{
667 int ret = 0;
668
669 switch (obj->type) {
670 case ACPI_TYPE_INTEGER:
671 ret = snprintf(buf, sz, "0x%08llx\n", obj->integer.value);
672 break;
673 case ACPI_TYPE_STRING:
674 ret = snprintf(buf, sz, "%s\n", obj->string.pointer);
675 break;
676 }
677
678 return ret;
679}
680
681static void atk_pack_print(char *buf, size_t sz, union acpi_object *pack)
682{
683 int ret;
684 int i;
685
686 for (i = 0; i < pack->package.count; i++) {
687 union acpi_object *obj = &pack->package.elements[i];
688
689 ret = atk_acpi_print(buf, sz, obj);
690 if (ret >= sz)
691 break;
692 buf += ret;
693 sz -= ret;
694 }
695}
696
697static int atk_debugfs_ggrp_open(struct inode *inode, struct file *file)
698{
699 struct atk_data *data = inode->i_private;
700 char *buf = NULL;
701 union acpi_object *ret;
702 u8 cls;
703 int i;
704
705 if (!data->enumerate_handle)
706 return -ENODEV;
707 if (!data->debugfs.id)
708 return -EINVAL;
709
710 cls = (data->debugfs.id & 0xff000000) >> 24;
711 ret = atk_ggrp(data, cls);
712 if (IS_ERR(ret))
713 return PTR_ERR(ret);
714
715 for (i = 0; i < ret->package.count; i++) {
716 union acpi_object *pack = &ret->package.elements[i];
717 union acpi_object *id;
718
719 if (pack->type != ACPI_TYPE_PACKAGE)
720 continue;
721 if (!pack->package.count)
722 continue;
723 id = &pack->package.elements[0];
724 if (id->integer.value == data->debugfs.id) {
725 /* Print the package */
726 buf = kzalloc(512, GFP_KERNEL);
727 if (!buf) {
728 ACPI_FREE(ret);
729 return -ENOMEM;
730 }
731 atk_pack_print(buf, 512, pack);
732 break;
733 }
734 }
735 ACPI_FREE(ret);
736
737 if (!buf)
738 return -EINVAL;
739
740 file->private_data = buf;
741
742 return nonseekable_open(inode, file);
743}
744
745static ssize_t atk_debugfs_ggrp_read(struct file *file, char __user *buf,
746 size_t count, loff_t *pos)
747{
748 char *str = file->private_data;
749 size_t len = strlen(str);
750
751 return simple_read_from_buffer(buf, count, pos, str, len);
752}
753
754static int atk_debugfs_ggrp_release(struct inode *inode, struct file *file)
755{
756 kfree(file->private_data);
757 return 0;
758}
759
760static const struct file_operations atk_debugfs_ggrp_fops = {
761 .read = atk_debugfs_ggrp_read,
762 .open = atk_debugfs_ggrp_open,
763 .release = atk_debugfs_ggrp_release,
764};
765
766static void atk_debugfs_init(struct atk_data *data)
767{
768 struct dentry *d;
769 struct dentry *f;
770
771 data->debugfs.id = 0;
772
773 d = debugfs_create_dir("asus_atk0110", NULL);
774 if (!d || IS_ERR(d))
775 return;
776
777 f = debugfs_create_x32("id", S_IRUSR | S_IWUSR, d, &data->debugfs.id);
778 if (!f || IS_ERR(f))
779 goto cleanup;
780
781 f = debugfs_create_file("gitm", S_IRUSR, d, data,
782 &atk_debugfs_gitm);
783 if (!f || IS_ERR(f))
784 goto cleanup;
785
786 f = debugfs_create_file("ggrp", S_IRUSR, d, data,
787 &atk_debugfs_ggrp_fops);
788 if (!f || IS_ERR(f))
789 goto cleanup;
790
791 data->debugfs.root = d;
792
793 return;
794cleanup:
795 debugfs_remove_recursive(d);
796}
797
798static void atk_debugfs_cleanup(struct atk_data *data)
799{
800 debugfs_remove_recursive(data->debugfs.root);
801}
802
803#else /* CONFIG_DEBUG_FS */
804
805static void atk_debugfs_init(struct atk_data *data)
806{
807}
808
809static void atk_debugfs_cleanup(struct atk_data *data)
810{
811}
812#endif
813
627static int atk_add_sensor(struct atk_data *data, union acpi_object *obj) 814static int atk_add_sensor(struct atk_data *data, union acpi_object *obj)
628{ 815{
629 struct device *dev = &data->acpi_dev->dev; 816 struct device *dev = &data->acpi_dev->dev;
@@ -1047,76 +1234,75 @@ remove:
1047 return err; 1234 return err;
1048} 1235}
1049 1236
1050static int atk_check_old_if(struct atk_data *data) 1237static int atk_probe_if(struct atk_data *data)
1051{ 1238{
1052 struct device *dev = &data->acpi_dev->dev; 1239 struct device *dev = &data->acpi_dev->dev;
1053 acpi_handle ret; 1240 acpi_handle ret;
1054 acpi_status status; 1241 acpi_status status;
1242 int err = 0;
1055 1243
1056 /* RTMP: read temperature */ 1244 /* RTMP: read temperature */
1057 status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_TMP, &ret); 1245 status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_TMP, &ret);
1058 if (status != AE_OK) { 1246 if (ACPI_SUCCESS(status))
1247 data->rtmp_handle = ret;
1248 else
1059 dev_dbg(dev, "method " METHOD_OLD_READ_TMP " not found: %s\n", 1249 dev_dbg(dev, "method " METHOD_OLD_READ_TMP " not found: %s\n",
1060 acpi_format_exception(status)); 1250 acpi_format_exception(status));
1061 return -ENODEV;
1062 }
1063 data->rtmp_handle = ret;
1064 1251
1065 /* RVLT: read voltage */ 1252 /* RVLT: read voltage */
1066 status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_VLT, &ret); 1253 status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_VLT, &ret);
1067 if (status != AE_OK) { 1254 if (ACPI_SUCCESS(status))
1255 data->rvlt_handle = ret;
1256 else
1068 dev_dbg(dev, "method " METHOD_OLD_READ_VLT " not found: %s\n", 1257 dev_dbg(dev, "method " METHOD_OLD_READ_VLT " not found: %s\n",
1069 acpi_format_exception(status)); 1258 acpi_format_exception(status));
1070 return -ENODEV;
1071 }
1072 data->rvlt_handle = ret;
1073 1259
1074 /* RFAN: read fan status */ 1260 /* RFAN: read fan status */
1075 status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_FAN, &ret); 1261 status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_FAN, &ret);
1076 if (status != AE_OK) { 1262 if (ACPI_SUCCESS(status))
1263 data->rfan_handle = ret;
1264 else
1077 dev_dbg(dev, "method " METHOD_OLD_READ_FAN " not found: %s\n", 1265 dev_dbg(dev, "method " METHOD_OLD_READ_FAN " not found: %s\n",
1078 acpi_format_exception(status)); 1266 acpi_format_exception(status));
1079 return -ENODEV;
1080 }
1081 data->rfan_handle = ret;
1082
1083 return 0;
1084}
1085
1086static int atk_check_new_if(struct atk_data *data)
1087{
1088 struct device *dev = &data->acpi_dev->dev;
1089 acpi_handle ret;
1090 acpi_status status;
1091 1267
1092 /* Enumeration */ 1268 /* Enumeration */
1093 status = acpi_get_handle(data->atk_handle, METHOD_ENUMERATE, &ret); 1269 status = acpi_get_handle(data->atk_handle, METHOD_ENUMERATE, &ret);
1094 if (status != AE_OK) { 1270 if (ACPI_SUCCESS(status))
1271 data->enumerate_handle = ret;
1272 else
1095 dev_dbg(dev, "method " METHOD_ENUMERATE " not found: %s\n", 1273 dev_dbg(dev, "method " METHOD_ENUMERATE " not found: %s\n",
1096 acpi_format_exception(status)); 1274 acpi_format_exception(status));
1097 return -ENODEV;
1098 }
1099 data->enumerate_handle = ret;
1100 1275
1101 /* De-multiplexer (read) */ 1276 /* De-multiplexer (read) */
1102 status = acpi_get_handle(data->atk_handle, METHOD_READ, &ret); 1277 status = acpi_get_handle(data->atk_handle, METHOD_READ, &ret);
1103 if (status != AE_OK) { 1278 if (ACPI_SUCCESS(status))
1279 data->read_handle = ret;
1280 else
1104 dev_dbg(dev, "method " METHOD_READ " not found: %s\n", 1281 dev_dbg(dev, "method " METHOD_READ " not found: %s\n",
1105 acpi_format_exception(status)); 1282 acpi_format_exception(status));
1106 return -ENODEV;
1107 }
1108 data->read_handle = ret;
1109 1283
1110 /* De-multiplexer (write) */ 1284 /* De-multiplexer (write) */
1111 status = acpi_get_handle(data->atk_handle, METHOD_WRITE, &ret); 1285 status = acpi_get_handle(data->atk_handle, METHOD_WRITE, &ret);
1112 if (status != AE_OK) { 1286 if (ACPI_SUCCESS(status))
1113 dev_dbg(dev, "method " METHOD_READ " not found: %s\n", 1287 data->write_handle = ret;
1288 else
1289 dev_dbg(dev, "method " METHOD_WRITE " not found: %s\n",
1114 acpi_format_exception(status)); 1290 acpi_format_exception(status));
1115 return -ENODEV;
1116 }
1117 data->write_handle = ret;
1118 1291
1119 return 0; 1292 /* Check for hwmon methods: first check "old" style methods; note that
1293 * both may be present: in this case we stick to the old interface;
1294 * analysis of multiple DSDTs indicates that when both interfaces
1295 * are present the new one (GGRP/GITM) is not functional.
1296 */
1297 if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle)
1298 data->old_interface = true;
1299 else if (data->enumerate_handle && data->read_handle &&
1300 data->write_handle)
1301 data->old_interface = false;
1302 else
1303 err = -ENODEV;
1304
1305 return err;
1120} 1306}
1121 1307
1122static int atk_add(struct acpi_device *device) 1308static int atk_add(struct acpi_device *device)
@@ -1143,40 +1329,30 @@ static int atk_add(struct acpi_device *device)
1143 &buf, ACPI_TYPE_PACKAGE); 1329 &buf, ACPI_TYPE_PACKAGE);
1144 if (ret != AE_OK) { 1330 if (ret != AE_OK) {
1145 dev_dbg(&device->dev, "atk: method MBIF not found\n"); 1331 dev_dbg(&device->dev, "atk: method MBIF not found\n");
1146 err = -ENODEV; 1332 } else {
1147 goto out; 1333 obj = buf.pointer;
1334 if (obj->package.count >= 2) {
1335 union acpi_object *id = &obj->package.elements[1];
1336 if (id->type == ACPI_TYPE_STRING)
1337 dev_dbg(&device->dev, "board ID = %s\n",
1338 id->string.pointer);
1339 }
1340 ACPI_FREE(buf.pointer);
1148 } 1341 }
1149 1342
1150 obj = buf.pointer; 1343 err = atk_probe_if(data);
1151 if (obj->package.count >= 2 && 1344 if (err) {
1152 obj->package.elements[1].type == ACPI_TYPE_STRING) { 1345 dev_err(&device->dev, "No usable hwmon interface detected\n");
1153 dev_dbg(&device->dev, "board ID = %s\n", 1346 goto out;
1154 obj->package.elements[1].string.pointer);
1155 } 1347 }
1156 ACPI_FREE(buf.pointer);
1157 1348
1158 /* Check for hwmon methods: first check "old" style methods; note that 1349 if (data->old_interface) {
1159 * both may be present: in this case we stick to the old interface;
1160 * analysis of multiple DSDTs indicates that when both interfaces
1161 * are present the new one (GGRP/GITM) is not functional.
1162 */
1163 err = atk_check_old_if(data);
1164 if (!err) {
1165 dev_dbg(&device->dev, "Using old hwmon interface\n"); 1350 dev_dbg(&device->dev, "Using old hwmon interface\n");
1166 data->old_interface = true; 1351 err = atk_enumerate_old_hwmon(data);
1167 } else { 1352 } else {
1168 err = atk_check_new_if(data);
1169 if (err)
1170 goto out;
1171
1172 dev_dbg(&device->dev, "Using new hwmon interface\n"); 1353 dev_dbg(&device->dev, "Using new hwmon interface\n");
1173 data->old_interface = false;
1174 }
1175
1176 if (data->old_interface)
1177 err = atk_enumerate_old_hwmon(data);
1178 else
1179 err = atk_enumerate_new_hwmon(data); 1354 err = atk_enumerate_new_hwmon(data);
1355 }
1180 if (err < 0) 1356 if (err < 0)
1181 goto out; 1357 goto out;
1182 if (err == 0) { 1358 if (err == 0) {
@@ -1190,6 +1366,8 @@ static int atk_add(struct acpi_device *device)
1190 if (err) 1366 if (err)
1191 goto cleanup; 1367 goto cleanup;
1192 1368
1369 atk_debugfs_init(data);
1370
1193 device->driver_data = data; 1371 device->driver_data = data;
1194 return 0; 1372 return 0;
1195cleanup: 1373cleanup:
@@ -1208,6 +1386,8 @@ static int atk_remove(struct acpi_device *device, int type)
1208 1386
1209 device->driver_data = NULL; 1387 device->driver_data = NULL;
1210 1388
1389 atk_debugfs_cleanup(data);
1390
1211 atk_remove_files(data); 1391 atk_remove_files(data);
1212 atk_free_sensors(data); 1392 atk_free_sensors(data);
1213 hwmon_device_unregister(data->hwmon_dev); 1393 hwmon_device_unregister(data->hwmon_dev);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index caef39cda8c8..2d7bceeed0bc 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -33,6 +33,7 @@
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/cpu.h> 35#include <linux/cpu.h>
36#include <linux/pci.h>
36#include <asm/msr.h> 37#include <asm/msr.h>
37#include <asm/processor.h> 38#include <asm/processor.h>
38 39
@@ -161,6 +162,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
161 int usemsr_ee = 1; 162 int usemsr_ee = 1;
162 int err; 163 int err;
163 u32 eax, edx; 164 u32 eax, edx;
165 struct pci_dev *host_bridge;
164 166
165 /* Early chips have no MSR for TjMax */ 167 /* Early chips have no MSR for TjMax */
166 168
@@ -168,11 +170,21 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
168 usemsr_ee = 0; 170 usemsr_ee = 0;
169 } 171 }
170 172
171 /* Atoms seems to have TjMax at 90C */ 173 /* Atom CPUs */
172 174
173 if (c->x86_model == 0x1c) { 175 if (c->x86_model == 0x1c) {
174 usemsr_ee = 0; 176 usemsr_ee = 0;
175 tjmax = 90000; 177
178 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
179
180 if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL
181 && (host_bridge->device == 0xa000 /* NM10 based nettop */
182 || host_bridge->device == 0xa010)) /* NM10 based netbook */
183 tjmax = 100000;
184 else
185 tjmax = 90000;
186
187 pci_dev_put(host_bridge);
176 } 188 }
177 189
178 if ((c->x86_model > 0xe) && (usemsr_ee)) { 190 if ((c->x86_model > 0xe) && (usemsr_ee)) {
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index bd0fc67e804b..fa0728232e71 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -768,6 +768,7 @@ leave:
768static int watchdog_open(struct inode *inode, struct file *filp) 768static int watchdog_open(struct inode *inode, struct file *filp)
769{ 769{
770 struct fschmd_data *pos, *data = NULL; 770 struct fschmd_data *pos, *data = NULL;
771 int watchdog_is_open;
771 772
772 /* We get called from drivers/char/misc.c with misc_mtx hold, and we 773 /* We get called from drivers/char/misc.c with misc_mtx hold, and we
773 call misc_register() from fschmd_probe() with watchdog_data_mutex 774 call misc_register() from fschmd_probe() with watchdog_data_mutex
@@ -782,10 +783,12 @@ static int watchdog_open(struct inode *inode, struct file *filp)
782 } 783 }
783 } 784 }
784 /* Note we can never not have found data, so we don't check for this */ 785 /* Note we can never not have found data, so we don't check for this */
785 kref_get(&data->kref); 786 watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
787 if (!watchdog_is_open)
788 kref_get(&data->kref);
786 mutex_unlock(&watchdog_data_mutex); 789 mutex_unlock(&watchdog_data_mutex);
787 790
788 if (test_and_set_bit(0, &data->watchdog_is_open)) 791 if (watchdog_is_open)
789 return -EBUSY; 792 return -EBUSY;
790 793
791 /* Start the watchdog */ 794 /* Start the watchdog */
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index d8a26d16d948..099a2138cdf6 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -33,6 +33,16 @@ static bool force;
33module_param(force, bool, 0444); 33module_param(force, bool, 0444);
34MODULE_PARM_DESC(force, "force loading on processors with erratum 319"); 34MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
35 35
36/* CPUID function 0x80000001, ebx */
37#define CPUID_PKGTYPE_MASK 0xf0000000
38#define CPUID_PKGTYPE_F 0x00000000
39#define CPUID_PKGTYPE_AM2R2_AM3 0x10000000
40
41/* DRAM controller (PCI function 2) */
42#define REG_DCT0_CONFIG_HIGH 0x094
43#define DDR3_MODE 0x00000100
44
45/* miscellaneous (PCI function 3) */
36#define REG_HARDWARE_THERMAL_CONTROL 0x64 46#define REG_HARDWARE_THERMAL_CONTROL 0x64
37#define HTC_ENABLE 0x00000001 47#define HTC_ENABLE 0x00000001
38 48
@@ -85,13 +95,28 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
85static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1); 95static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
86static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 96static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
87 97
88static bool __devinit has_erratum_319(void) 98static bool __devinit has_erratum_319(struct pci_dev *pdev)
89{ 99{
100 u32 pkg_type, reg_dram_cfg;
101
102 if (boot_cpu_data.x86 != 0x10)
103 return false;
104
90 /* 105 /*
91 * Erratum 319: The thermal sensor of older Family 10h processors 106 * Erratum 319: The thermal sensor of Socket F/AM2+ processors
92 * (B steppings) may be unreliable. 107 * may be unreliable.
93 */ 108 */
94 return boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model <= 2; 109 pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK;
110 if (pkg_type == CPUID_PKGTYPE_F)
111 return true;
112 if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
113 return false;
114
115 /* Differentiate between AM2+ (bad) and AM3 (good) */
116 pci_bus_read_config_dword(pdev->bus,
117 PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
118 REG_DCT0_CONFIG_HIGH, &reg_dram_cfg);
119 return !(reg_dram_cfg & DDR3_MODE);
95} 120}
96 121
97static int __devinit k10temp_probe(struct pci_dev *pdev, 122static int __devinit k10temp_probe(struct pci_dev *pdev,
@@ -99,9 +124,10 @@ static int __devinit k10temp_probe(struct pci_dev *pdev,
99{ 124{
100 struct device *hwmon_dev; 125 struct device *hwmon_dev;
101 u32 reg_caps, reg_htc; 126 u32 reg_caps, reg_htc;
127 int unreliable = has_erratum_319(pdev);
102 int err; 128 int err;
103 129
104 if (has_erratum_319() && !force) { 130 if (unreliable && !force) {
105 dev_err(&pdev->dev, 131 dev_err(&pdev->dev,
106 "unreliable CPU thermal sensor; monitoring disabled\n"); 132 "unreliable CPU thermal sensor; monitoring disabled\n");
107 err = -ENODEV; 133 err = -ENODEV;
@@ -139,7 +165,7 @@ static int __devinit k10temp_probe(struct pci_dev *pdev,
139 } 165 }
140 dev_set_drvdata(&pdev->dev, hwmon_dev); 166 dev_set_drvdata(&pdev->dev, hwmon_dev);
141 167
142 if (has_erratum_319() && force) 168 if (unreliable && force)
143 dev_warn(&pdev->dev, 169 dev_warn(&pdev->dev,
144 "unreliable CPU thermal sensor; check erratum 319\n"); 170 "unreliable CPU thermal sensor; check erratum 319\n");
145 return 0; 171 return 0;
@@ -169,7 +195,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
169 dev_set_drvdata(&pdev->dev, NULL); 195 dev_set_drvdata(&pdev->dev, NULL);
170} 196}
171 197
172static struct pci_device_id k10temp_id_table[] = { 198static const struct pci_device_id k10temp_id_table[] = {
173 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 199 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
174 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, 200 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
175 {} 201 {}
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 1fe995111841..0ceb6d6200a3 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -136,7 +136,7 @@ static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0);
136static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1); 136static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1);
137static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 137static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
138 138
139static struct pci_device_id k8temp_ids[] = { 139static const struct pci_device_id k8temp_ids[] = {
140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
141 { 0 }, 141 { 0 },
142}; 142};
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index cadcbd90ff3b..72ff2c4e757d 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev)
851static int __init lm78_isa_found(unsigned short address) 851static int __init lm78_isa_found(unsigned short address)
852{ 852{
853 int val, save, found = 0; 853 int val, save, found = 0;
854 854 int port;
855 /* We have to request the region in two parts because some 855
856 boards declare base+4 to base+7 as a PNP device */ 856 /* Some boards declare base+0 to base+7 as a PNP device, some base+4
857 if (!request_region(address, 4, "lm78")) { 857 * to base+7 and some base+5 to base+6. So we better request each port
858 pr_debug("lm78: Failed to request low part of region\n"); 858 * individually for the probing phase. */
859 return 0; 859 for (port = address; port < address + LM78_EXTENT; port++) {
860 } 860 if (!request_region(port, 1, "lm78")) {
861 if (!request_region(address + 4, 4, "lm78")) { 861 pr_debug("lm78: Failed to request port 0x%x\n", port);
862 pr_debug("lm78: Failed to request high part of region\n"); 862 goto release;
863 release_region(address, 4); 863 }
864 return 0;
865 } 864 }
866 865
867#define REALLY_SLOW_IO 866#define REALLY_SLOW_IO
@@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address)
925 val & 0x80 ? "LM79" : "LM78", (int)address); 924 val & 0x80 ? "LM79" : "LM78", (int)address);
926 925
927 release: 926 release:
928 release_region(address + 4, 4); 927 for (port--; port >= address; port--)
929 release_region(address, 4); 928 release_region(port, 1);
930 return found; 929 return found;
931} 930}
932 931
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 12f2e7086560..79c2931e3008 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -697,7 +697,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev)
697 return data; 697 return data;
698} 698}
699 699
700static struct pci_device_id sis5595_pci_ids[] = { 700static const struct pci_device_id sis5595_pci_ids[] = {
701 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, 701 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
702 { 0, } 702 { 0, }
703}; 703};
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 9ca97818bd4b..8fa462f2b570 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -488,7 +488,7 @@ static int __init smsc47m1_find(unsigned short *addr,
488} 488}
489 489
490/* Restore device to its initial state */ 490/* Restore device to its initial state */
491static void __init smsc47m1_restore(const struct smsc47m1_sio_data *sio_data) 491static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
492{ 492{
493 if ((sio_data->activate & 0x01) == 0) { 493 if ((sio_data->activate & 0x01) == 0) {
494 superio_enter(); 494 superio_enter();
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 39e82a492f26..f397ce7ad598 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -767,7 +767,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
767 return data; 767 return data;
768} 768}
769 769
770static struct pci_device_id via686a_pci_ids[] = { 770static const struct pci_device_id via686a_pci_ids[] = {
771 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) }, 771 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
772 { 0, } 772 { 0, }
773}; 773};
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 470a1226ba2b..d47b4c9949c2 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -697,7 +697,7 @@ static struct platform_driver vt8231_driver = {
697 .remove = __devexit_p(vt8231_remove), 697 .remove = __devexit_p(vt8231_remove),
698}; 698};
699 699
700static struct pci_device_id vt8231_pci_ids[] = { 700static const struct pci_device_id vt8231_pci_ids[] = {
701 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, 701 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
702 { 0, } 702 { 0, }
703}; 703};
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 05f9225b6f94..32d4adee73db 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1793,17 +1793,17 @@ static int __init
1793w83781d_isa_found(unsigned short address) 1793w83781d_isa_found(unsigned short address)
1794{ 1794{
1795 int val, save, found = 0; 1795 int val, save, found = 0;
1796 1796 int port;
1797 /* We have to request the region in two parts because some 1797
1798 boards declare base+4 to base+7 as a PNP device */ 1798 /* Some boards declare base+0 to base+7 as a PNP device, some base+4
1799 if (!request_region(address, 4, "w83781d")) { 1799 * to base+7 and some base+5 to base+6. So we better request each port
1800 pr_debug("w83781d: Failed to request low part of region\n"); 1800 * individually for the probing phase. */
1801 return 0; 1801 for (port = address; port < address + W83781D_EXTENT; port++) {
1802 } 1802 if (!request_region(port, 1, "w83781d")) {
1803 if (!request_region(address + 4, 4, "w83781d")) { 1803 pr_debug("w83781d: Failed to request port 0x%x\n",
1804 pr_debug("w83781d: Failed to request high part of region\n"); 1804 port);
1805 release_region(address, 4); 1805 goto release;
1806 return 0; 1806 }
1807 } 1807 }
1808 1808
1809#define REALLY_SLOW_IO 1809#define REALLY_SLOW_IO
@@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address)
1877 val == 0x30 ? "W83782D" : "W83781D", (int)address); 1877 val == 0x30 ? "W83782D" : "W83781D", (int)address);
1878 1878
1879 release: 1879 release:
1880 release_region(address + 4, 4); 1880 for (port--; port >= address; port--)
1881 release_region(address, 4); 1881 release_region(port, 1);
1882 return found; 1882 return found;
1883} 1883}
1884 1884
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index f70f46582c6c..4687af40dd50 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -87,9 +87,9 @@ static int ali1563_transaction(struct i2c_adapter * a, int size)
87 outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2); 87 outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2);
88 88
89 timeout = ALI1563_MAX_TIMEOUT; 89 timeout = ALI1563_MAX_TIMEOUT;
90 do 90 do {
91 msleep(1); 91 msleep(1);
92 while (((data = inb_p(SMB_HST_STS)) & HST_STS_BUSY) && --timeout); 92 } while (((data = inb_p(SMB_HST_STS)) & HST_STS_BUSY) && --timeout);
93 93
94 dev_dbg(&a->dev, "Transaction (post): STS=%02x, CNTL1=%02x, " 94 dev_dbg(&a->dev, "Transaction (post): STS=%02x, CNTL1=%02x, "
95 "CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", 95 "CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
@@ -157,9 +157,9 @@ static int ali1563_block_start(struct i2c_adapter * a)
157 outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2); 157 outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2);
158 158
159 timeout = ALI1563_MAX_TIMEOUT; 159 timeout = ALI1563_MAX_TIMEOUT;
160 do 160 do {
161 msleep(1); 161 msleep(1);
162 while (!((data = inb_p(SMB_HST_STS)) & HST_STS_DONE) && --timeout); 162 } while (!((data = inb_p(SMB_HST_STS)) & HST_STS_DONE) && --timeout);
163 163
164 dev_dbg(&a->dev, "Block (post): STS=%02x, CNTL1=%02x, " 164 dev_dbg(&a->dev, "Block (post): STS=%02x, CNTL1=%02x, "
165 "CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", 165 "CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index e3654d683e15..75bf820e7ccb 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -226,7 +226,6 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
226 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 226 temp = readb(i2c_imx->base + IMX_I2C_I2CR);
227 temp &= ~(I2CR_MSTA | I2CR_MTX); 227 temp &= ~(I2CR_MSTA | I2CR_MTX);
228 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 228 writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
229 i2c_imx->stopped = 1;
230 } 229 }
231 if (cpu_is_mx1()) { 230 if (cpu_is_mx1()) {
232 /* 231 /*
@@ -236,8 +235,10 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
236 udelay(i2c_imx->disable_delay); 235 udelay(i2c_imx->disable_delay);
237 } 236 }
238 237
239 if (!i2c_imx->stopped) 238 if (!i2c_imx->stopped) {
240 i2c_imx_bus_busy(i2c_imx, 0); 239 i2c_imx_bus_busy(i2c_imx, 0);
240 i2c_imx->stopped = 1;
241 }
241 242
242 /* Disable I2C controller */ 243 /* Disable I2C controller */
243 writeb(0, i2c_imx->base + IMX_I2C_I2CR); 244 writeb(0, i2c_imx->base + IMX_I2C_I2CR);
@@ -496,22 +497,23 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
496 } 497 }
497 498
498 res_size = resource_size(res); 499 res_size = resource_size(res);
500
501 if (!request_mem_region(res->start, res_size, DRIVER_NAME)) {
502 ret = -EBUSY;
503 goto fail0;
504 }
505
499 base = ioremap(res->start, res_size); 506 base = ioremap(res->start, res_size);
500 if (!base) { 507 if (!base) {
501 dev_err(&pdev->dev, "ioremap failed\n"); 508 dev_err(&pdev->dev, "ioremap failed\n");
502 ret = -EIO; 509 ret = -EIO;
503 goto fail0; 510 goto fail1;
504 } 511 }
505 512
506 i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL); 513 i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL);
507 if (!i2c_imx) { 514 if (!i2c_imx) {
508 dev_err(&pdev->dev, "can't allocate interface\n"); 515 dev_err(&pdev->dev, "can't allocate interface\n");
509 ret = -ENOMEM; 516 ret = -ENOMEM;
510 goto fail1;
511 }
512
513 if (!request_mem_region(res->start, res_size, DRIVER_NAME)) {
514 ret = -EBUSY;
515 goto fail2; 517 goto fail2;
516 } 518 }
517 519
@@ -582,11 +584,11 @@ fail5:
582fail4: 584fail4:
583 clk_put(i2c_imx->clk); 585 clk_put(i2c_imx->clk);
584fail3: 586fail3:
585 release_mem_region(i2c_imx->res->start, resource_size(res));
586fail2:
587 kfree(i2c_imx); 587 kfree(i2c_imx);
588fail1: 588fail2:
589 iounmap(base); 589 iounmap(base);
590fail1:
591 release_mem_region(res->start, resource_size(res));
590fail0: 592fail0:
591 if (pdata && pdata->exit) 593 if (pdata && pdata->exit)
592 pdata->exit(&pdev->dev); 594 pdata->exit(&pdev->dev);
@@ -618,8 +620,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
618 620
619 clk_put(i2c_imx->clk); 621 clk_put(i2c_imx->clk);
620 622
621 release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res));
622 iounmap(i2c_imx->base); 623 iounmap(i2c_imx->base);
624 release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res));
623 kfree(i2c_imx); 625 kfree(i2c_imx);
624 return 0; 626 return 0;
625} 627}
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 0ed68e2ccd22..f7346a9bd95f 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -75,7 +75,7 @@ static int pca_isa_waitforcompletion(void *pd)
75 unsigned long timeout; 75 unsigned long timeout;
76 76
77 if (irq > -1) { 77 if (irq > -1) {
78 ret = wait_event_interruptible_timeout(pca_wait, 78 ret = wait_event_timeout(pca_wait,
79 pca_isa_readbyte(pd, I2C_PCA_CON) 79 pca_isa_readbyte(pd, I2C_PCA_CON)
80 & I2C_PCA_CON_SI, pca_isa_ops.timeout); 80 & I2C_PCA_CON_SI, pca_isa_ops.timeout);
81 } else { 81 } else {
@@ -96,7 +96,7 @@ static void pca_isa_resetchip(void *pd)
96} 96}
97 97
98static irqreturn_t pca_handler(int this_irq, void *dev_id) { 98static irqreturn_t pca_handler(int this_irq, void *dev_id) {
99 wake_up_interruptible(&pca_wait); 99 wake_up(&pca_wait);
100 return IRQ_HANDLED; 100 return IRQ_HANDLED;
101} 101}
102 102
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index c4df9d411cd5..5b2213df5ed0 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -84,7 +84,7 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
84 unsigned long timeout; 84 unsigned long timeout;
85 85
86 if (i2c->irq) { 86 if (i2c->irq) {
87 ret = wait_event_interruptible_timeout(i2c->wait, 87 ret = wait_event_timeout(i2c->wait,
88 i2c->algo_data.read_byte(i2c, I2C_PCA_CON) 88 i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
89 & I2C_PCA_CON_SI, i2c->adap.timeout); 89 & I2C_PCA_CON_SI, i2c->adap.timeout);
90 } else { 90 } else {
@@ -122,7 +122,7 @@ static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id)
122 if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) 122 if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0)
123 return IRQ_NONE; 123 return IRQ_NONE;
124 124
125 wake_up_interruptible(&i2c->wait); 125 wake_up(&i2c->wait);
126 126
127 return IRQ_HANDLED; 127 return IRQ_HANDLED;
128} 128}
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 1e245e9cad31..e56e4b6823ca 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -324,12 +324,12 @@ static int piix4_transaction(void)
324 else 324 else
325 msleep(1); 325 msleep(1);
326 326
327 while ((timeout++ < MAX_TIMEOUT) && 327 while ((++timeout < MAX_TIMEOUT) &&
328 ((temp = inb_p(SMBHSTSTS)) & 0x01)) 328 ((temp = inb_p(SMBHSTSTS)) & 0x01))
329 msleep(1); 329 msleep(1);
330 330
331 /* If the SMBus is still busy, we give up */ 331 /* If the SMBus is still busy, we give up */
332 if (timeout >= MAX_TIMEOUT) { 332 if (timeout == MAX_TIMEOUT) {
333 dev_err(&piix4_adapter.dev, "SMBus Timeout!\n"); 333 dev_err(&piix4_adapter.dev, "SMBus Timeout!\n");
334 result = -ETIMEDOUT; 334 result = -ETIMEDOUT;
335 } 335 }
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index b1c050ff311d..e29b6d5ba8ef 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/types.h>
16 17
17/* include interfaces to usb layer */ 18/* include interfaces to usb layer */
18#include <linux/usb.h> 19#include <linux/usb.h>
@@ -31,8 +32,8 @@
31#define CMD_I2C_IO_END (1<<1) 32#define CMD_I2C_IO_END (1<<1)
32 33
33/* i2c bit delay, default is 10us -> 100kHz */ 34/* i2c bit delay, default is 10us -> 100kHz */
34static int delay = 10; 35static unsigned short delay = 10;
35module_param(delay, int, 0); 36module_param(delay, ushort, 0);
36MODULE_PARM_DESC(delay, "bit delay in microseconds, " 37MODULE_PARM_DESC(delay, "bit delay in microseconds, "
37 "e.g. 10 for 100kHz (default is 100kHz)"); 38 "e.g. 10 for 100kHz (default is 100kHz)");
38 39
@@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
109 110
110static u32 usb_func(struct i2c_adapter *adapter) 111static u32 usb_func(struct i2c_adapter *adapter)
111{ 112{
112 u32 func; 113 __le32 func;
113 114
114 /* get functionality from adapter */ 115 /* get functionality from adapter */
115 if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != 116 if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
@@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter)
118 return 0; 119 return 0;
119 } 120 }
120 121
121 return func; 122 return le32_to_cpu(func);
122} 123}
123 124
124/* This is the actual algorithm we define */ 125/* This is the actual algorithm we define */
@@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
216 "i2c-tiny-usb at bus %03d device %03d", 217 "i2c-tiny-usb at bus %03d device %03d",
217 dev->usb_dev->bus->busnum, dev->usb_dev->devnum); 218 dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
218 219
219 if (usb_write(&dev->adapter, CMD_SET_DELAY, 220 if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
220 cpu_to_le16(delay), 0, NULL, 0) != 0) {
221 dev_err(&dev->adapter.dev, 221 dev_err(&dev->adapter.dev,
222 "failure setting delay to %dus\n", delay); 222 "failure setting delay to %dus\n", delay);
223 retval = -EIO; 223 retval = -EIO;
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index e4b1543015af..a84a909e1234 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -165,10 +165,10 @@ static int vt596_transaction(u8 size)
165 do { 165 do {
166 msleep(1); 166 msleep(1);
167 temp = inb_p(SMBHSTSTS); 167 temp = inb_p(SMBHSTSTS);
168 } while ((temp & 0x01) && (timeout++ < MAX_TIMEOUT)); 168 } while ((temp & 0x01) && (++timeout < MAX_TIMEOUT));
169 169
170 /* If the SMBus is still busy, we give up */ 170 /* If the SMBus is still busy, we give up */
171 if (timeout >= MAX_TIMEOUT) { 171 if (timeout == MAX_TIMEOUT) {
172 result = -ETIMEDOUT; 172 result = -ETIMEDOUT;
173 dev_err(&vt596_adapter.dev, "SMBus timeout!\n"); 173 dev_err(&vt596_adapter.dev, "SMBus timeout!\n");
174 } 174 }
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0ac2f90ab840..10be7b5fbe97 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -248,7 +248,7 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
248 NULL 248 NULL
249}; 249};
250 250
251const static struct dev_pm_ops i2c_device_pm_ops = { 251static const struct dev_pm_ops i2c_device_pm_ops = {
252 .suspend = i2c_device_pm_suspend, 252 .suspend = i2c_device_pm_suspend,
253 .resume = i2c_device_pm_resume, 253 .resume = i2c_device_pm_resume,
254}; 254};
@@ -843,6 +843,9 @@ int i2c_del_adapter(struct i2c_adapter *adap)
843 adap->dev.parent); 843 adap->dev.parent);
844#endif 844#endif
845 845
846 /* device name is gone after device_unregister */
847 dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
848
846 /* clean up the sysfs representation */ 849 /* clean up the sysfs representation */
847 init_completion(&adap->dev_released); 850 init_completion(&adap->dev_released);
848 device_unregister(&adap->dev); 851 device_unregister(&adap->dev);
@@ -855,8 +858,6 @@ int i2c_del_adapter(struct i2c_adapter *adap)
855 idr_remove(&i2c_adapter_idr, adap->nr); 858 idr_remove(&i2c_adapter_idr, adap->nr);
856 mutex_unlock(&core_lock); 859 mutex_unlock(&core_lock);
857 860
858 dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
859
860 /* Clear the device structure in case this adapter is ever going to be 861 /* Clear the device structure in case this adapter is ever going to be
861 added again */ 862 added again */
862 memset(&adap->dev, 0, sizeof(adap->dev)); 863 memset(&adap->dev, 0, sizeof(adap->dev));
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index f102fcc7e52a..e02096cf7d95 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -1,8 +1,3 @@
1menu "IEEE 1394 (FireWire) support"
2 depends on PCI || BROKEN
3
4source "drivers/firewire/Kconfig"
5
6config IEEE1394 1config IEEE1394
7 tristate "Legacy alternative FireWire driver stack" 2 tristate "Legacy alternative FireWire driver stack"
8 depends on PCI || BROKEN 3 depends on PCI || BROKEN
@@ -16,8 +11,13 @@ config IEEE1394
16 is the core support only, you will also need to select a driver for 11 is the core support only, you will also need to select a driver for
17 your IEEE 1394 adapter. 12 your IEEE 1394 adapter.
18 13
19 To compile this driver as a module, say M here: the 14 To compile this driver as a module, say M here: the module will be
20 module will be called ieee1394. 15 called ieee1394.
16
17 NOTE:
18 ieee1394 is superseded by the newer firewire-core driver. See
19 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
20 further information on how to switch to the new FireWire drivers.
21 21
22config IEEE1394_OHCI1394 22config IEEE1394_OHCI1394
23 tristate "OHCI-1394 controllers" 23 tristate "OHCI-1394 controllers"
@@ -29,19 +29,23 @@ config IEEE1394_OHCI1394
29 use one of these chipsets. It should work with any OHCI-1394 29 use one of these chipsets. It should work with any OHCI-1394
30 compliant card, however. 30 compliant card, however.
31 31
32 To compile this driver as a module, say M here: the 32 To compile this driver as a module, say M here: the module will be
33 module will be called ohci1394. 33 called ohci1394.
34 34
35 NOTE: 35 NOTE:
36 ohci1394 is superseded by the newer firewire-ohci driver. See
37 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
38 further information on how to switch to the new FireWire drivers.
39
36 If you want to install firewire-ohci and ohci1394 together, you 40 If you want to install firewire-ohci and ohci1394 together, you
37 should configure them only as modules and blacklist the driver(s) 41 should configure them only as modules and blacklist the driver(s)
38 which you don't want to have auto-loaded. Add either 42 which you don't want to have auto-loaded. Add either
39 43
40 blacklist firewire-ohci
41 or
42 blacklist ohci1394 44 blacklist ohci1394
43 blacklist video1394 45 blacklist video1394
44 blacklist dv1394 46 blacklist dv1394
47 or
48 blacklist firewire-ohci
45 49
46 to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf 50 to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
47 depending on your distribution. 51 depending on your distribution.
@@ -58,8 +62,8 @@ config IEEE1394_PCILYNX
58 Instruments PCILynx chip. Note: this driver is written for revision 62 Instruments PCILynx chip. Note: this driver is written for revision
59 2 of this chip and may not work with revision 0. 63 2 of this chip and may not work with revision 0.
60 64
61 To compile this driver as a module, say M here: the 65 To compile this driver as a module, say M here: the module will be
62 module will be called pcilynx. 66 called pcilynx.
63 67
64 Only some old and now very rare PCI and CardBus cards and 68 Only some old and now very rare PCI and CardBus cards and
65 PowerMacs G3 B&W contain the PCILynx controller. Therefore 69 PowerMacs G3 B&W contain the PCILynx controller. Therefore
@@ -79,6 +83,14 @@ config IEEE1394_SBP2
79 You should also enable support for disks, CD-ROMs, etc. in the SCSI 83 You should also enable support for disks, CD-ROMs, etc. in the SCSI
80 configuration section. 84 configuration section.
81 85
86 To compile this driver as a module, say M here: the module will be
87 called sbp2.
88
89 NOTE:
90 sbp2 is superseded by the newer firewire-sbp2 driver. See
91 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
92 further information on how to switch to the new FireWire drivers.
93
82config IEEE1394_SBP2_PHYS_DMA 94config IEEE1394_SBP2_PHYS_DMA
83 bool "Enable replacement for physical DMA in SBP2" 95 bool "Enable replacement for physical DMA in SBP2"
84 depends on IEEE1394_SBP2 && VIRT_TO_BUS && EXPERIMENTAL 96 depends on IEEE1394_SBP2 && VIRT_TO_BUS && EXPERIMENTAL
@@ -111,6 +123,11 @@ config IEEE1394_ETH1394
111 123
112 The module is called eth1394 although it does not emulate Ethernet. 124 The module is called eth1394 although it does not emulate Ethernet.
113 125
126 NOTE:
127 eth1394 is superseded by the newer firewire-net driver. See
128 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
129 further information on how to switch to the new FireWire drivers.
130
114config IEEE1394_RAWIO 131config IEEE1394_RAWIO
115 tristate "raw1394 userspace interface" 132 tristate "raw1394 userspace interface"
116 depends on IEEE1394 133 depends on IEEE1394
@@ -123,6 +140,11 @@ config IEEE1394_RAWIO
123 To compile this driver as a module, say M here: the module will be 140 To compile this driver as a module, say M here: the module will be
124 called raw1394. 141 called raw1394.
125 142
143 NOTE:
144 raw1394 is superseded by the newer firewire-core driver. See
145 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
146 further information on how to switch to the new FireWire drivers.
147
126config IEEE1394_VIDEO1394 148config IEEE1394_VIDEO1394
127 tristate "video1394 userspace interface" 149 tristate "video1394 userspace interface"
128 depends on IEEE1394 && IEEE1394_OHCI1394 150 depends on IEEE1394 && IEEE1394_OHCI1394
@@ -136,13 +158,18 @@ config IEEE1394_VIDEO1394
136 To compile this driver as a module, say M here: the module will be 158 To compile this driver as a module, say M here: the module will be
137 called video1394. 159 called video1394.
138 160
161 NOTE:
162 video1394 is superseded by the newer firewire-core driver. See
163 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
164 further information on how to switch to the new FireWire drivers.
165
139config IEEE1394_DV1394 166config IEEE1394_DV1394
140 tristate "dv1394 userspace interface (deprecated)" 167 tristate "dv1394 userspace interface (deprecated)"
141 depends on IEEE1394 && IEEE1394_OHCI1394 168 depends on IEEE1394 && IEEE1394_OHCI1394
142 help 169 help
143 The dv1394 driver is unsupported and may be removed from Linux in a 170 The dv1394 driver is unsupported and may be removed from Linux in a
144 future release. Its functionality is now provided by raw1394 together 171 future release. Its functionality is now provided by either
145 with libraries such as libiec61883. 172 raw1394 or firewire-core together with libraries such as libiec61883.
146 173
147config IEEE1394_VERBOSEDEBUG 174config IEEE1394_VERBOSEDEBUG
148 bool "Excessive debugging output" 175 bool "Excessive debugging output"
@@ -153,5 +180,3 @@ config IEEE1394_VERBOSEDEBUG
153 will quickly result in large amounts of data sent to the system log. 180 will quickly result in large amounts of data sent to the system log.
154 181
155 Say Y if you really need the debugging output. Everyone else says N. 182 Say Y if you really need the debugging output. Everyone else says N.
156
157endmenu
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index fbdd73106000..875e34e0b235 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2083,7 +2083,7 @@ static int cma_get_port(struct rdma_id_private *id_priv)
2083static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 2083static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
2084 struct sockaddr *addr) 2084 struct sockaddr *addr)
2085{ 2085{
2086#if defined(CONFIG_IPv6) || defined(CONFIG_IPV6_MODULE) 2086#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2087 struct sockaddr_in6 *sin6; 2087 struct sockaddr_in6 *sin6;
2088 2088
2089 if (addr->sa_family != AF_INET6) 2089 if (addr->sa_family != AF_INET6)
@@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2115 if (ret) 2115 if (ret)
2116 goto err1; 2116 goto err1;
2117 2117
2118 if (cma_loopback_addr(addr)) { 2118 if (!cma_any_addr(addr)) {
2119 ret = cma_bind_loopback(id_priv);
2120 } else if (!cma_zero_addr(addr)) {
2121 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2119 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2122 if (ret) 2120 if (ret)
2123 goto err1; 2121 goto err1;
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index b3684060465e..100da8542bba 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -346,10 +346,8 @@ static int ipathfs_fill_super(struct super_block *sb, void *data,
346 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { 346 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
347 spin_unlock_irqrestore(&ipath_devs_lock, flags); 347 spin_unlock_irqrestore(&ipath_devs_lock, flags);
348 ret = create_device_files(sb, dd); 348 ret = create_device_files(sb, dd);
349 if (ret) { 349 if (ret)
350 deactivate_locked_super(sb);
351 goto bail; 350 goto bail;
352 }
353 spin_lock_irqsave(&ipath_devs_lock, flags); 351 spin_lock_irqsave(&ipath_devs_lock, flags);
354 } 352 }
355 353
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 989555cee883..2a97c964b9ef 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1752,7 +1752,7 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1752 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); 1752 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
1753 1753
1754 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1754 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1755 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) { 1755 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1756 err = -ENOMEM; 1756 err = -ENOMEM;
1757 *bad_wr = wr; 1757 *bad_wr = wr;
1758 goto out; 1758 goto out;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index d42565258fb7..cf8085bcbd6d 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -74,6 +74,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
74 struct mlx4_ib_dev *dev = to_mdev(pd->device); 74 struct mlx4_ib_dev *dev = to_mdev(pd->device);
75 struct mlx4_ib_srq *srq; 75 struct mlx4_ib_srq *srq;
76 struct mlx4_wqe_srq_next_seg *next; 76 struct mlx4_wqe_srq_next_seg *next;
77 struct mlx4_wqe_data_seg *scatter;
77 int desc_size; 78 int desc_size;
78 int buf_size; 79 int buf_size;
79 int err; 80 int err;
@@ -149,6 +150,11 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
149 next = get_wqe(srq, i); 150 next = get_wqe(srq, i);
150 next->next_wqe_index = 151 next->next_wqe_index =
151 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); 152 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
153
154 for (scatter = (void *) (next + 1);
155 (void *) scatter < (void *) next + desc_size;
156 ++scatter)
157 scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
152 } 158 }
153 159
154 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, 160 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index dee6706038aa..258c639571b5 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -59,7 +59,8 @@ static void evdev_pass_event(struct evdev_client *client,
59 client->head &= EVDEV_BUFFER_SIZE - 1; 59 client->head &= EVDEV_BUFFER_SIZE - 1;
60 spin_unlock(&client->buffer_lock); 60 spin_unlock(&client->buffer_lock);
61 61
62 kill_fasync(&client->fasync, SIGIO, POLL_IN); 62 if (event->type == EV_SYN)
63 kill_fasync(&client->fasync, SIGIO, POLL_IN);
63} 64}
64 65
65/* 66/*
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index b483b2995fa9..f967008f332e 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -221,11 +221,27 @@ static int get_compatible_type(struct ff_device *ff, int effect_type)
221} 221}
222 222
223/* 223/*
224 * Only left/right direction should be used (under/over 0x8000) for
225 * forward/reverse motor direction (to keep calculation fast & simple).
226 */
227static u16 ml_calculate_direction(u16 direction, u16 force,
228 u16 new_direction, u16 new_force)
229{
230 if (!force)
231 return new_direction;
232 if (!new_force)
233 return direction;
234 return (((u32)(direction >> 1) * force +
235 (new_direction >> 1) * new_force) /
236 (force + new_force)) << 1;
237}
238
239/*
224 * Combine two effects and apply gain. 240 * Combine two effects and apply gain.
225 */ 241 */
226static void ml_combine_effects(struct ff_effect *effect, 242static void ml_combine_effects(struct ff_effect *effect,
227 struct ml_effect_state *state, 243 struct ml_effect_state *state,
228 unsigned int gain) 244 int gain)
229{ 245{
230 struct ff_effect *new = state->effect; 246 struct ff_effect *new = state->effect;
231 unsigned int strong, weak, i; 247 unsigned int strong, weak, i;
@@ -252,8 +268,21 @@ static void ml_combine_effects(struct ff_effect *effect,
252 break; 268 break;
253 269
254 case FF_RUMBLE: 270 case FF_RUMBLE:
255 strong = new->u.rumble.strong_magnitude * gain / 0xffff; 271 strong = (u32)new->u.rumble.strong_magnitude * gain / 0xffff;
256 weak = new->u.rumble.weak_magnitude * gain / 0xffff; 272 weak = (u32)new->u.rumble.weak_magnitude * gain / 0xffff;
273
274 if (effect->u.rumble.strong_magnitude + strong)
275 effect->direction = ml_calculate_direction(
276 effect->direction,
277 effect->u.rumble.strong_magnitude,
278 new->direction, strong);
279 else if (effect->u.rumble.weak_magnitude + weak)
280 effect->direction = ml_calculate_direction(
281 effect->direction,
282 effect->u.rumble.weak_magnitude,
283 new->direction, weak);
284 else
285 effect->direction = 0;
257 effect->u.rumble.strong_magnitude = 286 effect->u.rumble.strong_magnitude =
258 min(strong + effect->u.rumble.strong_magnitude, 287 min(strong + effect->u.rumble.strong_magnitude,
259 0xffffU); 288 0xffffU);
@@ -268,6 +297,13 @@ static void ml_combine_effects(struct ff_effect *effect,
268 /* here we also scale it 0x7fff => 0xffff */ 297 /* here we also scale it 0x7fff => 0xffff */
269 i = i * gain / 0x7fff; 298 i = i * gain / 0x7fff;
270 299
300 if (effect->u.rumble.strong_magnitude + i)
301 effect->direction = ml_calculate_direction(
302 effect->direction,
303 effect->u.rumble.strong_magnitude,
304 new->direction, i);
305 else
306 effect->direction = 0;
271 effect->u.rumble.strong_magnitude = 307 effect->u.rumble.strong_magnitude =
272 min(i + effect->u.rumble.strong_magnitude, 0xffffU); 308 min(i + effect->u.rumble.strong_magnitude, 0xffffU);
273 effect->u.rumble.weak_magnitude = 309 effect->u.rumble.weak_magnitude =
@@ -411,8 +447,6 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
411 msecs_to_jiffies(state->effect->replay.length); 447 msecs_to_jiffies(state->effect->replay.length);
412 state->adj_at = state->play_at; 448 state->adj_at = state->play_at;
413 449
414 ml_schedule_timer(ml);
415
416 } else { 450 } else {
417 debug("initiated stop"); 451 debug("initiated stop");
418 452
@@ -420,10 +454,10 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
420 __set_bit(FF_EFFECT_ABORTING, &state->flags); 454 __set_bit(FF_EFFECT_ABORTING, &state->flags);
421 else 455 else
422 __clear_bit(FF_EFFECT_STARTED, &state->flags); 456 __clear_bit(FF_EFFECT_STARTED, &state->flags);
423
424 ml_play_effects(ml);
425 } 457 }
426 458
459 ml_play_effects(ml);
460
427 return 0; 461 return 0;
428} 462}
429 463
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ab060710688f..86cb2d2196ff 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -24,6 +24,7 @@
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/smp_lock.h> 26#include <linux/smp_lock.h>
27#include "input-compat.h"
27 28
28MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); 29MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
29MODULE_DESCRIPTION("Input core"); 30MODULE_DESCRIPTION("Input core");
@@ -45,6 +46,7 @@ static unsigned int input_abs_bypass_init_data[] __initdata = {
45 ABS_MT_TOOL_TYPE, 46 ABS_MT_TOOL_TYPE,
46 ABS_MT_BLOB_ID, 47 ABS_MT_BLOB_ID,
47 ABS_MT_TRACKING_ID, 48 ABS_MT_TRACKING_ID,
49 ABS_MT_PRESSURE,
48 0 50 0
49}; 51};
50static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)]; 52static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)];
@@ -764,6 +766,40 @@ static int input_attach_handler(struct input_dev *dev, struct input_handler *han
764 return error; 766 return error;
765} 767}
766 768
769#ifdef CONFIG_COMPAT
770
771static int input_bits_to_string(char *buf, int buf_size,
772 unsigned long bits, bool skip_empty)
773{
774 int len = 0;
775
776 if (INPUT_COMPAT_TEST) {
777 u32 dword = bits >> 32;
778 if (dword || !skip_empty)
779 len += snprintf(buf, buf_size, "%x ", dword);
780
781 dword = bits & 0xffffffffUL;
782 if (dword || !skip_empty || len)
783 len += snprintf(buf + len, max(buf_size - len, 0),
784 "%x", dword);
785 } else {
786 if (bits || !skip_empty)
787 len += snprintf(buf, buf_size, "%lx", bits);
788 }
789
790 return len;
791}
792
793#else /* !CONFIG_COMPAT */
794
795static int input_bits_to_string(char *buf, int buf_size,
796 unsigned long bits, bool skip_empty)
797{
798 return bits || !skip_empty ?
799 snprintf(buf, buf_size, "%lx", bits) : 0;
800}
801
802#endif
767 803
768#ifdef CONFIG_PROC_FS 804#ifdef CONFIG_PROC_FS
769 805
@@ -832,14 +868,25 @@ static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
832 unsigned long *bitmap, int max) 868 unsigned long *bitmap, int max)
833{ 869{
834 int i; 870 int i;
835 871 bool skip_empty = true;
836 for (i = BITS_TO_LONGS(max) - 1; i > 0; i--) 872 char buf[18];
837 if (bitmap[i])
838 break;
839 873
840 seq_printf(seq, "B: %s=", name); 874 seq_printf(seq, "B: %s=", name);
841 for (; i >= 0; i--) 875
842 seq_printf(seq, "%lx%s", bitmap[i], i > 0 ? " " : ""); 876 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
877 if (input_bits_to_string(buf, sizeof(buf),
878 bitmap[i], skip_empty)) {
879 skip_empty = false;
880 seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
881 }
882 }
883
884 /*
885 * If no output was produced print a single 0.
886 */
887 if (skip_empty)
888 seq_puts(seq, "0");
889
843 seq_putc(seq, '\n'); 890 seq_putc(seq, '\n');
844} 891}
845 892
@@ -1128,14 +1175,23 @@ static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1128{ 1175{
1129 int i; 1176 int i;
1130 int len = 0; 1177 int len = 0;
1178 bool skip_empty = true;
1179
1180 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1181 len += input_bits_to_string(buf + len, max(buf_size - len, 0),
1182 bitmap[i], skip_empty);
1183 if (len) {
1184 skip_empty = false;
1185 if (i > 0)
1186 len += snprintf(buf + len, max(buf_size - len, 0), " ");
1187 }
1188 }
1131 1189
1132 for (i = BITS_TO_LONGS(max) - 1; i > 0; i--) 1190 /*
1133 if (bitmap[i]) 1191 * If no output was produced print a single 0.
1134 break; 1192 */
1135 1193 if (len == 0)
1136 for (; i >= 0; i--) 1194 len = snprintf(buf, buf_size, "%d", 0);
1137 len += snprintf(buf + len, max(buf_size - len, 0),
1138 "%lx%s", bitmap[i], i > 0 ? " " : "");
1139 1195
1140 if (add_cr) 1196 if (add_cr)
1141 len += snprintf(buf + len, max(buf_size - len, 0), "\n"); 1197 len += snprintf(buf + len, max(buf_size - len, 0), "\n");
@@ -1150,7 +1206,8 @@ static ssize_t input_dev_show_cap_##bm(struct device *dev, \
1150{ \ 1206{ \
1151 struct input_dev *input_dev = to_input_dev(dev); \ 1207 struct input_dev *input_dev = to_input_dev(dev); \
1152 int len = input_print_bitmap(buf, PAGE_SIZE, \ 1208 int len = input_print_bitmap(buf, PAGE_SIZE, \
1153 input_dev->bm##bit, ev##_MAX, 1); \ 1209 input_dev->bm##bit, ev##_MAX, \
1210 true); \
1154 return min_t(int, len, PAGE_SIZE); \ 1211 return min_t(int, len, PAGE_SIZE); \
1155} \ 1212} \
1156static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) 1213static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
@@ -1214,7 +1271,7 @@ static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
1214 1271
1215 len = input_print_bitmap(&env->buf[env->buflen - 1], 1272 len = input_print_bitmap(&env->buf[env->buflen - 1],
1216 sizeof(env->buf) - env->buflen, 1273 sizeof(env->buf) - env->buflen,
1217 bitmap, max, 0); 1274 bitmap, max, false);
1218 if (len >= (sizeof(env->buf) - env->buflen)) 1275 if (len >= (sizeof(env->buf) - env->buflen))
1219 return -ENOMEM; 1276 return -ENOMEM;
1220 1277
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 67c207f5b1a1..45ac70eae0aa 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -277,7 +277,7 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
277 } 277 }
278 278
279#ifdef RESET_WORKS 279#ifdef RESET_WORKS
280 if ((gf2k->id != (GB(19,2,0) | GB(15,3,2) | GB(12,3,5))) || 280 if ((gf2k->id != (GB(19,2,0) | GB(15,3,2) | GB(12,3,5))) &&
281 (gf2k->id != (GB(31,2,0) | GB(27,3,2) | GB(24,3,5)))) { 281 (gf2k->id != (GB(31,2,0) | GB(27,3,2) | GB(24,3,5)))) {
282 err = -ENODEV; 282 err = -ENODEV;
283 goto fail2; 283 goto fail2;
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index f6c688cae334..b1edd778639c 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -210,7 +210,7 @@ static int iforce_open(struct input_dev *dev)
210 return 0; 210 return 0;
211} 211}
212 212
213static void iforce_release(struct input_dev *dev) 213static void iforce_close(struct input_dev *dev)
214{ 214{
215 struct iforce *iforce = input_get_drvdata(dev); 215 struct iforce *iforce = input_get_drvdata(dev);
216 int i; 216 int i;
@@ -228,30 +228,17 @@ static void iforce_release(struct input_dev *dev)
228 228
229 /* Disable force feedback playback */ 229 /* Disable force feedback playback */
230 iforce_send_packet(iforce, FF_CMD_ENABLE, "\001"); 230 iforce_send_packet(iforce, FF_CMD_ENABLE, "\001");
231 /* Wait for the command to complete */
232 wait_event_interruptible(iforce->wait,
233 !test_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags));
231 } 234 }
232 235
233 switch (iforce->bus) { 236 switch (iforce->bus) {
234#ifdef CONFIG_JOYSTICK_IFORCE_USB 237#ifdef CONFIG_JOYSTICK_IFORCE_USB
235 case IFORCE_USB:
236 usb_kill_urb(iforce->irq);
237
238 /* The device was unplugged before the file
239 * was released */
240 if (iforce->usbdev == NULL) {
241 iforce_delete_device(iforce);
242 kfree(iforce);
243 }
244 break;
245#endif
246 }
247}
248
249void iforce_delete_device(struct iforce *iforce)
250{
251 switch (iforce->bus) {
252#ifdef CONFIG_JOYSTICK_IFORCE_USB
253 case IFORCE_USB: 238 case IFORCE_USB:
254 iforce_usb_delete(iforce); 239 usb_kill_urb(iforce->irq);
240 usb_kill_urb(iforce->out);
241 usb_kill_urb(iforce->ctrl);
255 break; 242 break;
256#endif 243#endif
257#ifdef CONFIG_JOYSTICK_IFORCE_232 244#ifdef CONFIG_JOYSTICK_IFORCE_232
@@ -303,7 +290,7 @@ int iforce_init_device(struct iforce *iforce)
303 290
304 input_dev->name = "Unknown I-Force device"; 291 input_dev->name = "Unknown I-Force device";
305 input_dev->open = iforce_open; 292 input_dev->open = iforce_open;
306 input_dev->close = iforce_release; 293 input_dev->close = iforce_close;
307 294
308/* 295/*
309 * On-device memory allocation. 296 * On-device memory allocation.
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 9f289d8f52c6..b41303d3ec54 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -109,6 +109,7 @@ static void iforce_usb_out(struct urb *urb)
109 struct iforce *iforce = urb->context; 109 struct iforce *iforce = urb->context;
110 110
111 if (urb->status) { 111 if (urb->status) {
112 clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
112 dbg("urb->status %d, exiting", urb->status); 113 dbg("urb->status %d, exiting", urb->status);
113 return; 114 return;
114 } 115 }
@@ -186,33 +187,19 @@ fail:
186 return err; 187 return err;
187} 188}
188 189
189/* Called by iforce_delete() */
190void iforce_usb_delete(struct iforce* iforce)
191{
192 usb_kill_urb(iforce->irq);
193 usb_kill_urb(iforce->out);
194 usb_kill_urb(iforce->ctrl);
195
196 usb_free_urb(iforce->irq);
197 usb_free_urb(iforce->out);
198 usb_free_urb(iforce->ctrl);
199}
200
201static void iforce_usb_disconnect(struct usb_interface *intf) 190static void iforce_usb_disconnect(struct usb_interface *intf)
202{ 191{
203 struct iforce *iforce = usb_get_intfdata(intf); 192 struct iforce *iforce = usb_get_intfdata(intf);
204 int open = 0; /* FIXME! iforce->dev.handle->open; */
205 193
206 usb_set_intfdata(intf, NULL); 194 usb_set_intfdata(intf, NULL);
207 if (iforce) {
208 iforce->usbdev = NULL;
209 input_unregister_device(iforce->dev);
210 195
211 if (!open) { 196 input_unregister_device(iforce->dev);
212 iforce_delete_device(iforce); 197
213 kfree(iforce); 198 usb_free_urb(iforce->irq);
214 } 199 usb_free_urb(iforce->out);
215 } 200 usb_free_urb(iforce->ctrl);
201
202 kfree(iforce);
216} 203}
217 204
218static struct usb_device_id iforce_usb_ids [] = { 205static struct usb_device_id iforce_usb_ids [] = {
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index f2d91f4028ca..9f494b75848a 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -150,11 +150,9 @@ void iforce_serial_xmit(struct iforce *iforce);
150 150
151/* iforce-usb.c */ 151/* iforce-usb.c */
152void iforce_usb_xmit(struct iforce *iforce); 152void iforce_usb_xmit(struct iforce *iforce);
153void iforce_usb_delete(struct iforce *iforce);
154 153
155/* iforce-main.c */ 154/* iforce-main.c */
156int iforce_init_device(struct iforce *iforce); 155int iforce_init_device(struct iforce *iforce);
157void iforce_delete_device(struct iforce *iforce);
158 156
159/* iforce-packets.c */ 157/* iforce-packets.c */
160int iforce_control_playback(struct iforce*, u16 id, unsigned int); 158int iforce_control_playback(struct iforce*, u16 id, unsigned int);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 482cb1204e43..8a28fb7846dc 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -446,7 +446,7 @@ static void xpad_irq_in(struct urb *urb)
446 } 446 }
447 447
448exit: 448exit:
449 retval = usb_submit_urb (urb, GFP_ATOMIC); 449 retval = usb_submit_urb(urb, GFP_ATOMIC);
450 if (retval) 450 if (retval)
451 err ("%s - usb_submit_urb failed with result %d", 451 err ("%s - usb_submit_urb failed with result %d",
452 __func__, retval); 452 __func__, retval);
@@ -571,7 +571,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data,
571 xpad->odata[6] = 0x00; 571 xpad->odata[6] = 0x00;
572 xpad->odata[7] = 0x00; 572 xpad->odata[7] = 0x00;
573 xpad->irq_out->transfer_buffer_length = 8; 573 xpad->irq_out->transfer_buffer_length = 8;
574 usb_submit_urb(xpad->irq_out, GFP_KERNEL); 574 usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
575 } 575 }
576 576
577 return 0; 577 return 0;
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index a3573570c52f..7b4056292eaf 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -134,7 +134,8 @@ static const unsigned short atkbd_unxlate_table[128] = {
134#define ATKBD_CMD_GETID 0x02f2 134#define ATKBD_CMD_GETID 0x02f2
135#define ATKBD_CMD_SETREP 0x10f3 135#define ATKBD_CMD_SETREP 0x10f3
136#define ATKBD_CMD_ENABLE 0x00f4 136#define ATKBD_CMD_ENABLE 0x00f4
137#define ATKBD_CMD_RESET_DIS 0x00f5 137#define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */
138#define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */
138#define ATKBD_CMD_SETALL_MBR 0x00fa 139#define ATKBD_CMD_SETALL_MBR 0x00fa
139#define ATKBD_CMD_RESET_BAT 0x02ff 140#define ATKBD_CMD_RESET_BAT 0x02ff
140#define ATKBD_CMD_RESEND 0x00fe 141#define ATKBD_CMD_RESEND 0x00fe
@@ -224,8 +225,10 @@ struct atkbd {
224 225
225 struct delayed_work event_work; 226 struct delayed_work event_work;
226 unsigned long event_jiffies; 227 unsigned long event_jiffies;
227 struct mutex event_mutex;
228 unsigned long event_mask; 228 unsigned long event_mask;
229
230 /* Serializes reconnect(), attr->set() and event work */
231 struct mutex mutex;
229}; 232};
230 233
231/* 234/*
@@ -576,7 +579,7 @@ static void atkbd_event_work(struct work_struct *work)
576{ 579{
577 struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work); 580 struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work);
578 581
579 mutex_lock(&atkbd->event_mutex); 582 mutex_lock(&atkbd->mutex);
580 583
581 if (!atkbd->enabled) { 584 if (!atkbd->enabled) {
582 /* 585 /*
@@ -595,7 +598,7 @@ static void atkbd_event_work(struct work_struct *work)
595 atkbd_set_repeat_rate(atkbd); 598 atkbd_set_repeat_rate(atkbd);
596 } 599 }
597 600
598 mutex_unlock(&atkbd->event_mutex); 601 mutex_unlock(&atkbd->mutex);
599} 602}
600 603
601/* 604/*
@@ -611,7 +614,7 @@ static void atkbd_schedule_event_work(struct atkbd *atkbd, int event_bit)
611 614
612 atkbd->event_jiffies = jiffies; 615 atkbd->event_jiffies = jiffies;
613 set_bit(event_bit, &atkbd->event_mask); 616 set_bit(event_bit, &atkbd->event_mask);
614 wmb(); 617 mb();
615 schedule_delayed_work(&atkbd->event_work, delay); 618 schedule_delayed_work(&atkbd->event_work, delay);
616} 619}
617 620
@@ -836,7 +839,7 @@ static void atkbd_cleanup(struct serio *serio)
836 struct atkbd *atkbd = serio_get_drvdata(serio); 839 struct atkbd *atkbd = serio_get_drvdata(serio);
837 840
838 atkbd_disable(atkbd); 841 atkbd_disable(atkbd);
839 ps2_command(&atkbd->ps2dev, NULL, ATKBD_CMD_RESET_BAT); 842 ps2_command(&atkbd->ps2dev, NULL, ATKBD_CMD_RESET_DEF);
840} 843}
841 844
842 845
@@ -848,13 +851,20 @@ static void atkbd_disconnect(struct serio *serio)
848{ 851{
849 struct atkbd *atkbd = serio_get_drvdata(serio); 852 struct atkbd *atkbd = serio_get_drvdata(serio);
850 853
854 sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
855
851 atkbd_disable(atkbd); 856 atkbd_disable(atkbd);
852 857
853 /* make sure we don't have a command in flight */ 858 input_unregister_device(atkbd->dev);
859
860 /*
861 * Make sure we don't have a command in flight.
862 * Note that since atkbd->enabled is false event work will keep
863 * rescheduling itself until it gets canceled and will not try
864 * accessing freed input device or serio port.
865 */
854 cancel_delayed_work_sync(&atkbd->event_work); 866 cancel_delayed_work_sync(&atkbd->event_work);
855 867
856 sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
857 input_unregister_device(atkbd->dev);
858 serio_close(serio); 868 serio_close(serio);
859 serio_set_drvdata(serio, NULL); 869 serio_set_drvdata(serio, NULL);
860 kfree(atkbd); 870 kfree(atkbd);
@@ -1086,7 +1096,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
1086 atkbd->dev = dev; 1096 atkbd->dev = dev;
1087 ps2_init(&atkbd->ps2dev, serio); 1097 ps2_init(&atkbd->ps2dev, serio);
1088 INIT_DELAYED_WORK(&atkbd->event_work, atkbd_event_work); 1098 INIT_DELAYED_WORK(&atkbd->event_work, atkbd_event_work);
1089 mutex_init(&atkbd->event_mutex); 1099 mutex_init(&atkbd->mutex);
1090 1100
1091 switch (serio->id.type) { 1101 switch (serio->id.type) {
1092 1102
@@ -1159,19 +1169,23 @@ static int atkbd_reconnect(struct serio *serio)
1159{ 1169{
1160 struct atkbd *atkbd = serio_get_drvdata(serio); 1170 struct atkbd *atkbd = serio_get_drvdata(serio);
1161 struct serio_driver *drv = serio->drv; 1171 struct serio_driver *drv = serio->drv;
1172 int retval = -1;
1162 1173
1163 if (!atkbd || !drv) { 1174 if (!atkbd || !drv) {
1164 printk(KERN_DEBUG "atkbd: reconnect request, but serio is disconnected, ignoring...\n"); 1175 printk(KERN_DEBUG "atkbd: reconnect request, but serio is disconnected, ignoring...\n");
1165 return -1; 1176 return -1;
1166 } 1177 }
1167 1178
1179 mutex_lock(&atkbd->mutex);
1180
1168 atkbd_disable(atkbd); 1181 atkbd_disable(atkbd);
1169 1182
1170 if (atkbd->write) { 1183 if (atkbd->write) {
1171 if (atkbd_probe(atkbd)) 1184 if (atkbd_probe(atkbd))
1172 return -1; 1185 goto out;
1186
1173 if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra)) 1187 if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra))
1174 return -1; 1188 goto out;
1175 1189
1176 atkbd_activate(atkbd); 1190 atkbd_activate(atkbd);
1177 1191
@@ -1189,8 +1203,11 @@ static int atkbd_reconnect(struct serio *serio)
1189 } 1203 }
1190 1204
1191 atkbd_enable(atkbd); 1205 atkbd_enable(atkbd);
1206 retval = 0;
1192 1207
1193 return 0; 1208 out:
1209 mutex_unlock(&atkbd->mutex);
1210 return retval;
1194} 1211}
1195 1212
1196static struct serio_device_id atkbd_serio_ids[] = { 1213static struct serio_device_id atkbd_serio_ids[] = {
@@ -1234,47 +1251,28 @@ static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
1234 ssize_t (*handler)(struct atkbd *, char *)) 1251 ssize_t (*handler)(struct atkbd *, char *))
1235{ 1252{
1236 struct serio *serio = to_serio_port(dev); 1253 struct serio *serio = to_serio_port(dev);
1237 int retval; 1254 struct atkbd *atkbd = serio_get_drvdata(serio);
1238
1239 retval = serio_pin_driver(serio);
1240 if (retval)
1241 return retval;
1242
1243 if (serio->drv != &atkbd_drv) {
1244 retval = -ENODEV;
1245 goto out;
1246 }
1247
1248 retval = handler((struct atkbd *)serio_get_drvdata(serio), buf);
1249 1255
1250out: 1256 return handler(atkbd, buf);
1251 serio_unpin_driver(serio);
1252 return retval;
1253} 1257}
1254 1258
1255static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count, 1259static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
1256 ssize_t (*handler)(struct atkbd *, const char *, size_t)) 1260 ssize_t (*handler)(struct atkbd *, const char *, size_t))
1257{ 1261{
1258 struct serio *serio = to_serio_port(dev); 1262 struct serio *serio = to_serio_port(dev);
1259 struct atkbd *atkbd; 1263 struct atkbd *atkbd = serio_get_drvdata(serio);
1260 int retval; 1264 int retval;
1261 1265
1262 retval = serio_pin_driver(serio); 1266 retval = mutex_lock_interruptible(&atkbd->mutex);
1263 if (retval) 1267 if (retval)
1264 return retval; 1268 return retval;
1265 1269
1266 if (serio->drv != &atkbd_drv) {
1267 retval = -ENODEV;
1268 goto out;
1269 }
1270
1271 atkbd = serio_get_drvdata(serio);
1272 atkbd_disable(atkbd); 1270 atkbd_disable(atkbd);
1273 retval = handler(atkbd, buf, count); 1271 retval = handler(atkbd, buf, count);
1274 atkbd_enable(atkbd); 1272 atkbd_enable(atkbd);
1275 1273
1276out: 1274 mutex_unlock(&atkbd->mutex);
1277 serio_unpin_driver(serio); 1275
1278 return retval; 1276 return retval;
1279} 1277}
1280 1278
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index 6e52d855f637..d410d7a52f1d 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -174,6 +174,14 @@ static int __init davinci_ks_probe(struct platform_device *pdev)
174 struct davinci_ks_platform_data *pdata = pdev->dev.platform_data; 174 struct davinci_ks_platform_data *pdata = pdev->dev.platform_data;
175 int error, i; 175 int error, i;
176 176
177 if (pdata->device_enable) {
178 error = pdata->device_enable(dev);
179 if (error < 0) {
180 dev_dbg(dev, "device enable function failed\n");
181 return error;
182 }
183 }
184
177 if (!pdata->keymap) { 185 if (!pdata->keymap) {
178 dev_dbg(dev, "no keymap from pdata\n"); 186 dev_dbg(dev, "no keymap from pdata\n");
179 return -EINVAL; 187 return -EINVAL;
diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c
index 9caed30f3bbb..b1ab29861e1c 100644
--- a/drivers/input/keyboard/locomokbd.c
+++ b/drivers/input/keyboard/locomokbd.c
@@ -192,11 +192,18 @@ static void locomokbd_scankeyboard(struct locomokbd *locomokbd)
192static irqreturn_t locomokbd_interrupt(int irq, void *dev_id) 192static irqreturn_t locomokbd_interrupt(int irq, void *dev_id)
193{ 193{
194 struct locomokbd *locomokbd = dev_id; 194 struct locomokbd *locomokbd = dev_id;
195 u16 r;
196
197 r = locomo_readl(locomokbd->base + LOCOMO_KIC);
198 if ((r & 0x0001) == 0)
199 return IRQ_HANDLED;
200
201 locomo_writel(r & ~0x0100, locomokbd->base + LOCOMO_KIC); /* Ack */
202
195 /** wait chattering delay **/ 203 /** wait chattering delay **/
196 udelay(100); 204 udelay(100);
197 205
198 locomokbd_scankeyboard(locomokbd); 206 locomokbd_scankeyboard(locomokbd);
199
200 return IRQ_HANDLED; 207 return IRQ_HANDLED;
201} 208}
202 209
@@ -210,6 +217,25 @@ static void locomokbd_timer_callback(unsigned long data)
210 locomokbd_scankeyboard(locomokbd); 217 locomokbd_scankeyboard(locomokbd);
211} 218}
212 219
220static int locomokbd_open(struct input_dev *dev)
221{
222 struct locomokbd *locomokbd = input_get_drvdata(dev);
223 u16 r;
224
225 r = locomo_readl(locomokbd->base + LOCOMO_KIC) | 0x0010;
226 locomo_writel(r, locomokbd->base + LOCOMO_KIC);
227 return 0;
228}
229
230static void locomokbd_close(struct input_dev *dev)
231{
232 struct locomokbd *locomokbd = input_get_drvdata(dev);
233 u16 r;
234
235 r = locomo_readl(locomokbd->base + LOCOMO_KIC) & ~0x0010;
236 locomo_writel(r, locomokbd->base + LOCOMO_KIC);
237}
238
213static int __devinit locomokbd_probe(struct locomo_dev *dev) 239static int __devinit locomokbd_probe(struct locomo_dev *dev)
214{ 240{
215 struct locomokbd *locomokbd; 241 struct locomokbd *locomokbd;
@@ -253,6 +279,8 @@ static int __devinit locomokbd_probe(struct locomo_dev *dev)
253 input_dev->id.vendor = 0x0001; 279 input_dev->id.vendor = 0x0001;
254 input_dev->id.product = 0x0001; 280 input_dev->id.product = 0x0001;
255 input_dev->id.version = 0x0100; 281 input_dev->id.version = 0x0100;
282 input_dev->open = locomokbd_open;
283 input_dev->close = locomokbd_close;
256 input_dev->dev.parent = &dev->dev; 284 input_dev->dev.parent = &dev->dev;
257 285
258 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | 286 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
@@ -261,6 +289,8 @@ static int __devinit locomokbd_probe(struct locomo_dev *dev)
261 input_dev->keycodesize = sizeof(locomokbd_keycode[0]); 289 input_dev->keycodesize = sizeof(locomokbd_keycode[0]);
262 input_dev->keycodemax = ARRAY_SIZE(locomokbd_keycode); 290 input_dev->keycodemax = ARRAY_SIZE(locomokbd_keycode);
263 291
292 input_set_drvdata(input_dev, locomokbd);
293
264 memcpy(locomokbd->keycode, locomokbd_keycode, sizeof(locomokbd->keycode)); 294 memcpy(locomokbd->keycode, locomokbd_keycode, sizeof(locomokbd->keycode));
265 for (i = 0; i < LOCOMOKBD_NUMKEYS; i++) 295 for (i = 0; i < LOCOMOKBD_NUMKEYS; i++)
266 set_bit(locomokbd->keycode[i], input_dev->keybit); 296 set_bit(locomokbd->keycode[i], input_dev->keybit);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 34f4a29d4973..d3c8b61a941d 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -29,11 +29,13 @@ struct matrix_keypad {
29 unsigned short *keycodes; 29 unsigned short *keycodes;
30 unsigned int row_shift; 30 unsigned int row_shift;
31 31
32 DECLARE_BITMAP(disabled_gpios, MATRIX_MAX_ROWS);
33
32 uint32_t last_key_state[MATRIX_MAX_COLS]; 34 uint32_t last_key_state[MATRIX_MAX_COLS];
33 struct delayed_work work; 35 struct delayed_work work;
36 spinlock_t lock;
34 bool scan_pending; 37 bool scan_pending;
35 bool stopped; 38 bool stopped;
36 spinlock_t lock;
37}; 39};
38 40
39/* 41/*
@@ -222,9 +224,16 @@ static int matrix_keypad_suspend(struct device *dev)
222 224
223 matrix_keypad_stop(keypad->input_dev); 225 matrix_keypad_stop(keypad->input_dev);
224 226
225 if (device_may_wakeup(&pdev->dev)) 227 if (device_may_wakeup(&pdev->dev)) {
226 for (i = 0; i < pdata->num_row_gpios; i++) 228 for (i = 0; i < pdata->num_row_gpios; i++) {
227 enable_irq_wake(gpio_to_irq(pdata->row_gpios[i])); 229 if (!test_bit(i, keypad->disabled_gpios)) {
230 unsigned int gpio = pdata->row_gpios[i];
231
232 if (enable_irq_wake(gpio_to_irq(gpio)) == 0)
233 __set_bit(i, keypad->disabled_gpios);
234 }
235 }
236 }
228 237
229 return 0; 238 return 0;
230} 239}
@@ -236,9 +245,15 @@ static int matrix_keypad_resume(struct device *dev)
236 const struct matrix_keypad_platform_data *pdata = keypad->pdata; 245 const struct matrix_keypad_platform_data *pdata = keypad->pdata;
237 int i; 246 int i;
238 247
239 if (device_may_wakeup(&pdev->dev)) 248 if (device_may_wakeup(&pdev->dev)) {
240 for (i = 0; i < pdata->num_row_gpios; i++) 249 for (i = 0; i < pdata->num_row_gpios; i++) {
241 disable_irq_wake(gpio_to_irq(pdata->row_gpios[i])); 250 if (test_and_clear_bit(i, keypad->disabled_gpios)) {
251 unsigned int gpio = pdata->row_gpios[i];
252
253 disable_irq_wake(gpio_to_irq(gpio));
254 }
255 }
256 }
242 257
243 matrix_keypad_start(keypad->input_dev); 258 matrix_keypad_start(keypad->input_dev);
244 259
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index eeaa7acb9cfc..21d6184efa96 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -253,14 +253,6 @@ static irqreturn_t do_kp_irq(int irq, void *_kp)
253 u8 reg; 253 u8 reg;
254 int ret; 254 int ret;
255 255
256#ifdef CONFIG_LOCKDEP
257 /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
258 * we don't want and can't tolerate. Although it might be
259 * friendlier not to borrow this thread context...
260 */
261 local_irq_enable();
262#endif
263
264 /* Read & Clear TWL4030 pending interrupt */ 256 /* Read & Clear TWL4030 pending interrupt */
265 ret = twl4030_kpread(kp, &reg, KEYP_ISR1, 1); 257 ret = twl4030_kpread(kp, &reg, KEYP_ISR1, 1);
266 258
@@ -403,7 +395,8 @@ static int __devinit twl4030_kp_probe(struct platform_device *pdev)
403 * 395 *
404 * NOTE: we assume this host is wired to TWL4040 INT1, not INT2 ... 396 * NOTE: we assume this host is wired to TWL4040 INT1, not INT2 ...
405 */ 397 */
406 error = request_irq(kp->irq, do_kp_irq, 0, pdev->name, kp); 398 error = request_threaded_irq(kp->irq, NULL, do_kp_irq,
399 0, pdev->name, kp);
407 if (error) { 400 if (error) {
408 dev_info(kp->dbg_dev, "request_irq failed for irq no=%d\n", 401 dev_info(kp->dbg_dev, "request_irq failed for irq no=%d\n",
409 kp->irq); 402 kp->irq);
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index bdde5c889035..e9069b87fde2 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -39,18 +39,8 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
39 int err; 39 int err;
40 u8 value; 40 u8 value;
41 41
42#ifdef CONFIG_LOCKDEP
43 /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
44 * we don't want and can't tolerate since this is a threaded
45 * IRQ and can sleep due to the i2c reads it has to issue.
46 * Although it might be friendlier not to borrow this thread
47 * context...
48 */
49 local_irq_enable();
50#endif
51
52 err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value, 42 err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
53 STS_HW_CONDITIONS); 43 STS_HW_CONDITIONS);
54 if (!err) { 44 if (!err) {
55 input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ); 45 input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ);
56 input_sync(pwr); 46 input_sync(pwr);
@@ -80,7 +70,7 @@ static int __devinit twl4030_pwrbutton_probe(struct platform_device *pdev)
80 pwr->phys = "twl4030_pwrbutton/input0"; 70 pwr->phys = "twl4030_pwrbutton/input0";
81 pwr->dev.parent = &pdev->dev; 71 pwr->dev.parent = &pdev->dev;
82 72
83 err = request_irq(irq, powerbutton_irq, 73 err = request_threaded_irq(irq, NULL, powerbutton_irq,
84 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 74 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
85 "twl4030_pwrbutton", pwr); 75 "twl4030_pwrbutton", pwr);
86 if (err < 0) { 76 if (err < 0) {
diff --git a/drivers/input/misc/winbond-cir.c b/drivers/input/misc/winbond-cir.c
index 33309fe44e20..c8f5a9a3fa14 100644
--- a/drivers/input/misc/winbond-cir.c
+++ b/drivers/input/misc/winbond-cir.c
@@ -768,7 +768,7 @@ wbcir_parse_rc6(struct device *dev, struct wbcir_data *data)
768 return; 768 return;
769 } 769 }
770 770
771 dev_info(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X " 771 dev_dbg(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
772 "toggle %u mode %u scan 0x%08X\n", 772 "toggle %u mode %u scan 0x%08X\n",
773 address, 773 address,
774 command, 774 command,
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 38da6ab04384..c0afb71a3a6d 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -1328,7 +1328,7 @@ static struct platform_driver wistron_driver = {
1328 .driver = { 1328 .driver = {
1329 .name = "wistron-bios", 1329 .name = "wistron-bios",
1330 .owner = THIS_MODULE, 1330 .owner = THIS_MODULE,
1331#if CONFIG_PM 1331#ifdef CONFIG_PM
1332 .pm = &wistron_pm_ops, 1332 .pm = &wistron_pm_ops,
1333#endif 1333#endif
1334 }, 1334 },
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 3feeb3af8abd..c714ca2407f8 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -70,7 +70,7 @@ config MOUSE_PS2_SYNAPTICS
70config MOUSE_PS2_LIFEBOOK 70config MOUSE_PS2_LIFEBOOK
71 bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED 71 bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED
72 default y 72 default y
73 depends on MOUSE_PS2 && X86 73 depends on MOUSE_PS2 && X86 && DMI
74 help 74 help
75 Say Y here if you have a Fujitsu B-series Lifebook PS/2 75 Say Y here if you have a Fujitsu B-series Lifebook PS/2
76 TouchScreen connected to your system. 76 TouchScreen connected to your system.
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 0d1d33468b43..4f8fe0886b2a 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -139,6 +139,7 @@ struct tp_finger {
139/* trackpad finger data size, empirically at least ten fingers */ 139/* trackpad finger data size, empirically at least ten fingers */
140#define SIZEOF_FINGER sizeof(struct tp_finger) 140#define SIZEOF_FINGER sizeof(struct tp_finger)
141#define SIZEOF_ALL_FINGERS (16 * SIZEOF_FINGER) 141#define SIZEOF_ALL_FINGERS (16 * SIZEOF_FINGER)
142#define MAX_FINGER_ORIENTATION 16384
142 143
143/* device-specific parameters */ 144/* device-specific parameters */
144struct bcm5974_param { 145struct bcm5974_param {
@@ -284,6 +285,26 @@ static void setup_events_to_report(struct input_dev *input_dev,
284 input_set_abs_params(input_dev, ABS_Y, 285 input_set_abs_params(input_dev, ABS_Y,
285 0, cfg->y.dim, cfg->y.fuzz, 0); 286 0, cfg->y.dim, cfg->y.fuzz, 0);
286 287
288 /* finger touch area */
289 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
290 cfg->w.devmin, cfg->w.devmax, 0, 0);
291 input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
292 cfg->w.devmin, cfg->w.devmax, 0, 0);
293 /* finger approach area */
294 input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR,
295 cfg->w.devmin, cfg->w.devmax, 0, 0);
296 input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR,
297 cfg->w.devmin, cfg->w.devmax, 0, 0);
298 /* finger orientation */
299 input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
300 -MAX_FINGER_ORIENTATION,
301 MAX_FINGER_ORIENTATION, 0, 0);
302 /* finger position */
303 input_set_abs_params(input_dev, ABS_MT_POSITION_X,
304 cfg->x.devmin, cfg->x.devmax, 0, 0);
305 input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
306 cfg->y.devmin, cfg->y.devmax, 0, 0);
307
287 __set_bit(EV_KEY, input_dev->evbit); 308 __set_bit(EV_KEY, input_dev->evbit);
288 __set_bit(BTN_TOUCH, input_dev->keybit); 309 __set_bit(BTN_TOUCH, input_dev->keybit);
289 __set_bit(BTN_TOOL_FINGER, input_dev->keybit); 310 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
@@ -310,13 +331,29 @@ static int report_bt_state(struct bcm5974 *dev, int size)
310 return 0; 331 return 0;
311} 332}
312 333
334static void report_finger_data(struct input_dev *input,
335 const struct bcm5974_config *cfg,
336 const struct tp_finger *f)
337{
338 input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
339 input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
340 input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
341 input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
342 input_report_abs(input, ABS_MT_ORIENTATION,
343 MAX_FINGER_ORIENTATION - raw2int(f->orientation));
344 input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
345 input_report_abs(input, ABS_MT_POSITION_Y,
346 cfg->y.devmin + cfg->y.devmax - raw2int(f->abs_y));
347 input_mt_sync(input);
348}
349
313/* report trackpad data as logical trackpad state */ 350/* report trackpad data as logical trackpad state */
314static int report_tp_state(struct bcm5974 *dev, int size) 351static int report_tp_state(struct bcm5974 *dev, int size)
315{ 352{
316 const struct bcm5974_config *c = &dev->cfg; 353 const struct bcm5974_config *c = &dev->cfg;
317 const struct tp_finger *f; 354 const struct tp_finger *f;
318 struct input_dev *input = dev->input; 355 struct input_dev *input = dev->input;
319 int raw_p, raw_w, raw_x, raw_y, raw_n; 356 int raw_p, raw_w, raw_x, raw_y, raw_n, i;
320 int ptest, origin, ibt = 0, nmin = 0, nmax = 0; 357 int ptest, origin, ibt = 0, nmin = 0, nmax = 0;
321 int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0; 358 int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0;
322 359
@@ -329,6 +366,11 @@ static int report_tp_state(struct bcm5974 *dev, int size)
329 366
330 /* always track the first finger; when detached, start over */ 367 /* always track the first finger; when detached, start over */
331 if (raw_n) { 368 if (raw_n) {
369
370 /* report raw trackpad data */
371 for (i = 0; i < raw_n; i++)
372 report_finger_data(input, c, &f[i]);
373
332 raw_p = raw2int(f->force_major); 374 raw_p = raw2int(f->force_major);
333 raw_w = raw2int(f->size_major); 375 raw_w = raw2int(f->size_major);
334 raw_x = raw2int(f->abs_x); 376 raw_x = raw2int(f->abs_x);
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index b146237266d8..90be30e93556 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -427,7 +427,6 @@ static void hgpk_recalib_work(struct work_struct *work)
427 427
428static int hgpk_register(struct psmouse *psmouse) 428static int hgpk_register(struct psmouse *psmouse)
429{ 429{
430 struct input_dev *dev = psmouse->dev;
431 int err; 430 int err;
432 431
433 /* register handlers */ 432 /* register handlers */
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index 2e6bdfea0165..7c1d7d420ae3 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -44,7 +44,6 @@ static int lifebook_set_6byte_proto(const struct dmi_system_id *d)
44} 44}
45 45
46static const struct dmi_system_id __initconst lifebook_dmi_table[] = { 46static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
47#if defined(CONFIG_DMI) && defined(CONFIG_X86)
48 { 47 {
49 /* FLORA-ie 55mi */ 48 /* FLORA-ie 55mi */
50 .matches = { 49 .matches = {
@@ -54,6 +53,12 @@ static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
54 { 53 {
55 /* LifeBook B */ 54 /* LifeBook B */
56 .matches = { 55 .matches = {
56 DMI_MATCH(DMI_PRODUCT_NAME, "Lifebook B Series"),
57 },
58 },
59 {
60 /* LifeBook B */
61 .matches = {
57 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B Series"), 62 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B Series"),
58 }, 63 },
59 }, 64 },
@@ -118,7 +123,6 @@ static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
118 }, 123 },
119 }, 124 },
120 { } 125 { }
121#endif
122}; 126};
123 127
124void __init lifebook_module_init(void) 128void __init lifebook_module_init(void)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index fd0bc094616a..d8c0c8d6992c 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -627,8 +627,15 @@ static int psmouse_extensions(struct psmouse *psmouse,
627 synaptics_hardware = true; 627 synaptics_hardware = true;
628 628
629 if (max_proto > PSMOUSE_IMEX) { 629 if (max_proto > PSMOUSE_IMEX) {
630 if (!set_properties || synaptics_init(psmouse) == 0) 630/*
631 * Try activating protocol, but check if support is enabled first, since
632 * we try detecting Synaptics even when protocol is disabled.
633 */
634 if (synaptics_supported() &&
635 (!set_properties || synaptics_init(psmouse) == 0)) {
631 return PSMOUSE_SYNAPTICS; 636 return PSMOUSE_SYNAPTICS;
637 }
638
632/* 639/*
633 * Some Synaptics touchpads can emulate extended protocols (like IMPS/2). 640 * Some Synaptics touchpads can emulate extended protocols (like IMPS/2).
634 * Unfortunately Logitech/Genius probes confuse some firmware versions so 641 * Unfortunately Logitech/Genius probes confuse some firmware versions so
@@ -683,19 +690,6 @@ static int psmouse_extensions(struct psmouse *psmouse,
683 max_proto = PSMOUSE_IMEX; 690 max_proto = PSMOUSE_IMEX;
684 } 691 }
685 692
686/*
687 * Try Finger Sensing Pad
688 */
689 if (max_proto > PSMOUSE_IMEX) {
690 if (fsp_detect(psmouse, set_properties) == 0) {
691 if (!set_properties || fsp_init(psmouse) == 0)
692 return PSMOUSE_FSP;
693/*
694 * Init failed, try basic relative protocols
695 */
696 max_proto = PSMOUSE_IMEX;
697 }
698 }
699 693
700 if (max_proto > PSMOUSE_IMEX) { 694 if (max_proto > PSMOUSE_IMEX) {
701 if (genius_detect(psmouse, set_properties) == 0) 695 if (genius_detect(psmouse, set_properties) == 0)
@@ -712,6 +706,21 @@ static int psmouse_extensions(struct psmouse *psmouse,
712 } 706 }
713 707
714/* 708/*
709 * Try Finger Sensing Pad. We do it here because its probe upsets
710 * Trackpoint devices (causing TP_READ_ID command to time out).
711 */
712 if (max_proto > PSMOUSE_IMEX) {
713 if (fsp_detect(psmouse, set_properties) == 0) {
714 if (!set_properties || fsp_init(psmouse) == 0)
715 return PSMOUSE_FSP;
716/*
717 * Init failed, try basic relative protocols
718 */
719 max_proto = PSMOUSE_IMEX;
720 }
721 }
722
723/*
715 * Reset to defaults in case the device got confused by extended 724 * Reset to defaults in case the device got confused by extended
716 * protocol probes. Note that we follow up with full reset because 725 * protocol probes. Note that we follow up with full reset because
717 * some mice put themselves to sleep when they see PSMOUSE_RESET_DIS. 726 * some mice put themselves to sleep when they see PSMOUSE_RESET_DIS.
@@ -1132,12 +1141,22 @@ static void psmouse_cleanup(struct serio *serio)
1132 psmouse_deactivate(parent); 1141 psmouse_deactivate(parent);
1133 } 1142 }
1134 1143
1135 psmouse_deactivate(psmouse); 1144 psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
1145
1146 /*
1147 * Disable stream mode so cleanup routine can proceed undisturbed.
1148 */
1149 if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
1150 printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n",
1151 psmouse->ps2dev.serio->phys);
1136 1152
1137 if (psmouse->cleanup) 1153 if (psmouse->cleanup)
1138 psmouse->cleanup(psmouse); 1154 psmouse->cleanup(psmouse);
1139 1155
1140 psmouse_reset(psmouse); 1156/*
1157 * Reset the mouse to defaults (bare PS/2 protocol).
1158 */
1159 ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
1141 1160
1142/* 1161/*
1143 * Some boxes, such as HP nx7400, get terribly confused if mouse 1162 * Some boxes, such as HP nx7400, get terribly confused if mouse
@@ -1447,24 +1466,10 @@ ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *de
1447 struct serio *serio = to_serio_port(dev); 1466 struct serio *serio = to_serio_port(dev);
1448 struct psmouse_attribute *attr = to_psmouse_attr(devattr); 1467 struct psmouse_attribute *attr = to_psmouse_attr(devattr);
1449 struct psmouse *psmouse; 1468 struct psmouse *psmouse;
1450 int retval;
1451
1452 retval = serio_pin_driver(serio);
1453 if (retval)
1454 return retval;
1455
1456 if (serio->drv != &psmouse_drv) {
1457 retval = -ENODEV;
1458 goto out;
1459 }
1460 1469
1461 psmouse = serio_get_drvdata(serio); 1470 psmouse = serio_get_drvdata(serio);
1462 1471
1463 retval = attr->show(psmouse, attr->data, buf); 1472 return attr->show(psmouse, attr->data, buf);
1464
1465out:
1466 serio_unpin_driver(serio);
1467 return retval;
1468} 1473}
1469 1474
1470ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *devattr, 1475ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *devattr,
@@ -1475,18 +1480,9 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
1475 struct psmouse *psmouse, *parent = NULL; 1480 struct psmouse *psmouse, *parent = NULL;
1476 int retval; 1481 int retval;
1477 1482
1478 retval = serio_pin_driver(serio);
1479 if (retval)
1480 return retval;
1481
1482 if (serio->drv != &psmouse_drv) {
1483 retval = -ENODEV;
1484 goto out_unpin;
1485 }
1486
1487 retval = mutex_lock_interruptible(&psmouse_mutex); 1483 retval = mutex_lock_interruptible(&psmouse_mutex);
1488 if (retval) 1484 if (retval)
1489 goto out_unpin; 1485 goto out;
1490 1486
1491 psmouse = serio_get_drvdata(serio); 1487 psmouse = serio_get_drvdata(serio);
1492 1488
@@ -1516,8 +1512,7 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
1516 1512
1517 out_unlock: 1513 out_unlock:
1518 mutex_unlock(&psmouse_mutex); 1514 mutex_unlock(&psmouse_mutex);
1519 out_unpin: 1515 out:
1520 serio_unpin_driver(serio);
1521 return retval; 1516 return retval;
1522} 1517}
1523 1518
@@ -1579,9 +1574,7 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
1579 } 1574 }
1580 1575
1581 mutex_unlock(&psmouse_mutex); 1576 mutex_unlock(&psmouse_mutex);
1582 serio_unpin_driver(serio);
1583 serio_unregister_child_port(serio); 1577 serio_unregister_child_port(serio);
1584 serio_pin_driver_uninterruptible(serio);
1585 mutex_lock(&psmouse_mutex); 1578 mutex_lock(&psmouse_mutex);
1586 1579
1587 if (serio->drv != &psmouse_drv) { 1580 if (serio->drv != &psmouse_drv) {
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 77b9fd0b3fbf..81a6b81cb2fe 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -2,7 +2,7 @@
2 * Finger Sensing Pad PS/2 mouse driver. 2 * Finger Sensing Pad PS/2 mouse driver.
3 * 3 *
4 * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd. 4 * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
5 * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation. 5 * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -658,9 +658,9 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
658 if (packet[3] & BIT(1)) 658 if (packet[3] & BIT(1))
659 button_status |= 0x0f; /* wheel up */ 659 button_status |= 0x0f; /* wheel up */
660 if (packet[3] & BIT(2)) 660 if (packet[3] & BIT(2))
661 button_status |= BIT(5);/* horizontal left */ 661 button_status |= BIT(4);/* horizontal left */
662 if (packet[3] & BIT(3)) 662 if (packet[3] & BIT(3))
663 button_status |= BIT(4);/* horizontal right */ 663 button_status |= BIT(5);/* horizontal right */
664 /* push back to packet queue */ 664 /* push back to packet queue */
665 if (button_status != 0) 665 if (button_status != 0)
666 packet[3] = button_status; 666 packet[3] = button_status;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 05689e732191..d3f5243fa093 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -743,6 +743,11 @@ int synaptics_init(struct psmouse *psmouse)
743 return -1; 743 return -1;
744} 744}
745 745
746bool synaptics_supported(void)
747{
748 return true;
749}
750
746#else /* CONFIG_MOUSE_PS2_SYNAPTICS */ 751#else /* CONFIG_MOUSE_PS2_SYNAPTICS */
747 752
748void __init synaptics_module_init(void) 753void __init synaptics_module_init(void)
@@ -754,5 +759,10 @@ int synaptics_init(struct psmouse *psmouse)
754 return -ENOSYS; 759 return -ENOSYS;
755} 760}
756 761
762bool synaptics_supported(void)
763{
764 return false;
765}
766
757#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */ 767#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
758 768
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 838e7f2c9b30..f0f40a331dc8 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -109,5 +109,6 @@ void synaptics_module_init(void);
109int synaptics_detect(struct psmouse *psmouse, bool set_properties); 109int synaptics_detect(struct psmouse *psmouse, bool set_properties);
110int synaptics_init(struct psmouse *psmouse); 110int synaptics_init(struct psmouse *psmouse);
111void synaptics_reset(struct psmouse *psmouse); 111void synaptics_reset(struct psmouse *psmouse);
112bool synaptics_supported(void);
112 113
113#endif /* _SYNAPTICS_H */ 114#endif /* _SYNAPTICS_H */
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 64b688daf48a..2a5982e532f8 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -524,6 +524,13 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
524 */ 524 */
525static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = { 525static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
526 { 526 {
527 /* Acer Aspire 5610 */
528 .matches = {
529 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
530 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
531 },
532 },
533 {
527 /* Acer Aspire 5630 */ 534 /* Acer Aspire 5630 */
528 .matches = { 535 .matches = {
529 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 536 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index d84a36e545f6..b54aee7cd9e3 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev)
1161 return 0; 1161 return 0;
1162} 1162}
1163 1163
1164static int i8042_pm_thaw(struct device *dev)
1165{
1166 i8042_interrupt(0, NULL);
1167
1168 return 0;
1169}
1170
1164static const struct dev_pm_ops i8042_pm_ops = { 1171static const struct dev_pm_ops i8042_pm_ops = {
1165 .suspend = i8042_pm_reset, 1172 .suspend = i8042_pm_reset,
1166 .resume = i8042_pm_restore, 1173 .resume = i8042_pm_restore,
1174 .thaw = i8042_pm_thaw,
1167 .poweroff = i8042_pm_reset, 1175 .poweroff = i8042_pm_reset,
1168 .restore = i8042_pm_restore, 1176 .restore = i8042_pm_restore,
1169}; 1177};
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 0236f0d5fd91..e0f30186d513 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -284,13 +284,7 @@ static void serio_handle_event(void)
284 284
285 mutex_lock(&serio_mutex); 285 mutex_lock(&serio_mutex);
286 286
287 /* 287 while ((event = serio_get_event())) {
288 * Note that we handle only one event here to give swsusp
289 * a chance to freeze kseriod thread. Serio events should
290 * be pretty rare so we are not concerned about taking
291 * performance hit.
292 */
293 if ((event = serio_get_event())) {
294 288
295 switch (event->type) { 289 switch (event->type) {
296 case SERIO_REGISTER_PORT: 290 case SERIO_REGISTER_PORT:
@@ -380,10 +374,9 @@ static struct serio *serio_get_pending_child(struct serio *parent)
380 374
381static int serio_thread(void *nothing) 375static int serio_thread(void *nothing)
382{ 376{
383 set_freezable();
384 do { 377 do {
385 serio_handle_event(); 378 serio_handle_event();
386 wait_event_freezable(serio_wait, 379 wait_event_interruptible(serio_wait,
387 kthread_should_stop() || !list_empty(&serio_event_list)); 380 kthread_should_stop() || !list_empty(&serio_event_list));
388 } while (!kthread_should_stop()); 381 } while (!kthread_should_stop());
389 382
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index c21e6d3a8844..794d070c6900 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -47,6 +47,7 @@
47#include <linux/workqueue.h> 47#include <linux/workqueue.h>
48#include <linux/spi/spi.h> 48#include <linux/spi/spi.h>
49#include <linux/i2c.h> 49#include <linux/i2c.h>
50#include <linux/gpio.h>
50 51
51#include <linux/spi/ad7879.h> 52#include <linux/spi/ad7879.h>
52 53
@@ -132,7 +133,9 @@ struct ad7879 {
132 struct input_dev *input; 133 struct input_dev *input;
133 struct work_struct work; 134 struct work_struct work;
134 struct timer_list timer; 135 struct timer_list timer;
135 136#ifdef CONFIG_GPIOLIB
137 struct gpio_chip gc;
138#endif
136 struct mutex mutex; 139 struct mutex mutex;
137 unsigned disabled:1; /* P: mutex */ 140 unsigned disabled:1; /* P: mutex */
138 141
@@ -150,11 +153,9 @@ struct ad7879 {
150 u8 median; 153 u8 median;
151 u16 x_plate_ohms; 154 u16 x_plate_ohms;
152 u16 pressure_max; 155 u16 pressure_max;
153 u16 gpio_init;
154 u16 cmd_crtl1; 156 u16 cmd_crtl1;
155 u16 cmd_crtl2; 157 u16 cmd_crtl2;
156 u16 cmd_crtl3; 158 u16 cmd_crtl3;
157 unsigned gpio:1;
158}; 159};
159 160
160static int ad7879_read(bus_device *, u8); 161static int ad7879_read(bus_device *, u8);
@@ -237,24 +238,6 @@ static irqreturn_t ad7879_irq(int irq, void *handle)
237 238
238static void ad7879_setup(struct ad7879 *ts) 239static void ad7879_setup(struct ad7879 *ts)
239{ 240{
240 ts->cmd_crtl3 = AD7879_YPLUS_BIT |
241 AD7879_XPLUS_BIT |
242 AD7879_Z2_BIT |
243 AD7879_Z1_BIT |
244 AD7879_TEMPMASK_BIT |
245 AD7879_AUXVBATMASK_BIT |
246 AD7879_GPIOALERTMASK_BIT;
247
248 ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR |
249 AD7879_AVG(ts->averaging) |
250 AD7879_MFS(ts->median) |
251 AD7879_FCD(ts->first_conversion_delay) |
252 ts->gpio_init;
253
254 ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 |
255 AD7879_ACQ(ts->acquisition_time) |
256 AD7879_TMR(ts->pen_down_acc_interval);
257
258 ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2); 241 ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2);
259 ad7879_write(ts->bus, AD7879_REG_CTRL3, ts->cmd_crtl3); 242 ad7879_write(ts->bus, AD7879_REG_CTRL3, ts->cmd_crtl3);
260 ad7879_write(ts->bus, AD7879_REG_CTRL1, ts->cmd_crtl1); 243 ad7879_write(ts->bus, AD7879_REG_CTRL1, ts->cmd_crtl1);
@@ -324,48 +307,132 @@ static ssize_t ad7879_disable_store(struct device *dev,
324 307
325static DEVICE_ATTR(disable, 0664, ad7879_disable_show, ad7879_disable_store); 308static DEVICE_ATTR(disable, 0664, ad7879_disable_show, ad7879_disable_store);
326 309
327static ssize_t ad7879_gpio_show(struct device *dev, 310static struct attribute *ad7879_attributes[] = {
328 struct device_attribute *attr, char *buf) 311 &dev_attr_disable.attr,
312 NULL
313};
314
315static const struct attribute_group ad7879_attr_group = {
316 .attrs = ad7879_attributes,
317};
318
319#ifdef CONFIG_GPIOLIB
320static int ad7879_gpio_direction_input(struct gpio_chip *chip,
321 unsigned gpio)
329{ 322{
330 struct ad7879 *ts = dev_get_drvdata(dev); 323 struct ad7879 *ts = container_of(chip, struct ad7879, gc);
324 int err;
331 325
332 return sprintf(buf, "%u\n", ts->gpio); 326 mutex_lock(&ts->mutex);
327 ts->cmd_crtl2 |= AD7879_GPIO_EN | AD7879_GPIODIR | AD7879_GPIOPOL;
328 err = ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2);
329 mutex_unlock(&ts->mutex);
330
331 return err;
333} 332}
334 333
335static ssize_t ad7879_gpio_store(struct device *dev, 334static int ad7879_gpio_direction_output(struct gpio_chip *chip,
336 struct device_attribute *attr, 335 unsigned gpio, int level)
337 const char *buf, size_t count)
338{ 336{
339 struct ad7879 *ts = dev_get_drvdata(dev); 337 struct ad7879 *ts = container_of(chip, struct ad7879, gc);
340 unsigned long val; 338 int err;
341 int error;
342 339
343 error = strict_strtoul(buf, 10, &val); 340 mutex_lock(&ts->mutex);
344 if (error) 341 ts->cmd_crtl2 &= ~AD7879_GPIODIR;
345 return error; 342 ts->cmd_crtl2 |= AD7879_GPIO_EN | AD7879_GPIOPOL;
343 if (level)
344 ts->cmd_crtl2 |= AD7879_GPIO_DATA;
345 else
346 ts->cmd_crtl2 &= ~AD7879_GPIO_DATA;
347
348 err = ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2);
349 mutex_unlock(&ts->mutex);
350
351 return err;
352}
353
354static int ad7879_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
355{
356 struct ad7879 *ts = container_of(chip, struct ad7879, gc);
357 u16 val;
346 358
347 mutex_lock(&ts->mutex); 359 mutex_lock(&ts->mutex);
348 ts->gpio = !!val; 360 val = ad7879_read(ts->bus, AD7879_REG_CTRL2);
349 error = ad7879_write(ts->bus, AD7879_REG_CTRL2,
350 ts->gpio ?
351 ts->cmd_crtl2 & ~AD7879_GPIO_DATA :
352 ts->cmd_crtl2 | AD7879_GPIO_DATA);
353 mutex_unlock(&ts->mutex); 361 mutex_unlock(&ts->mutex);
354 362
355 return error ? : count; 363 return !!(val & AD7879_GPIO_DATA);
356} 364}
357 365
358static DEVICE_ATTR(gpio, 0664, ad7879_gpio_show, ad7879_gpio_store); 366static void ad7879_gpio_set_value(struct gpio_chip *chip,
367 unsigned gpio, int value)
368{
369 struct ad7879 *ts = container_of(chip, struct ad7879, gc);
359 370
360static struct attribute *ad7879_attributes[] = { 371 mutex_lock(&ts->mutex);
361 &dev_attr_disable.attr, 372 if (value)
362 &dev_attr_gpio.attr, 373 ts->cmd_crtl2 |= AD7879_GPIO_DATA;
363 NULL 374 else
364}; 375 ts->cmd_crtl2 &= ~AD7879_GPIO_DATA;
365 376
366static const struct attribute_group ad7879_attr_group = { 377 ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2);
367 .attrs = ad7879_attributes, 378 mutex_unlock(&ts->mutex);
368}; 379}
380
381static int __devinit ad7879_gpio_add(struct device *dev)
382{
383 struct ad7879 *ts = dev_get_drvdata(dev);
384 struct ad7879_platform_data *pdata = dev->platform_data;
385 int ret = 0;
386
387 if (pdata->gpio_export) {
388 ts->gc.direction_input = ad7879_gpio_direction_input;
389 ts->gc.direction_output = ad7879_gpio_direction_output;
390 ts->gc.get = ad7879_gpio_get_value;
391 ts->gc.set = ad7879_gpio_set_value;
392 ts->gc.can_sleep = 1;
393 ts->gc.base = pdata->gpio_base;
394 ts->gc.ngpio = 1;
395 ts->gc.label = "AD7879-GPIO";
396 ts->gc.owner = THIS_MODULE;
397 ts->gc.dev = dev;
398
399 ret = gpiochip_add(&ts->gc);
400 if (ret)
401 dev_err(dev, "failed to register gpio %d\n",
402 ts->gc.base);
403 }
404
405 return ret;
406}
407
408/*
409 * We mark ad7879_gpio_remove inline so there is a chance the code
410 * gets discarded when not needed. We can't do __devinit/__devexit
411 * markup since it is used in both probe and remove methods.
412 */
413static inline void ad7879_gpio_remove(struct device *dev)
414{
415 struct ad7879 *ts = dev_get_drvdata(dev);
416 struct ad7879_platform_data *pdata = dev->platform_data;
417 int ret;
418
419 if (pdata->gpio_export) {
420 ret = gpiochip_remove(&ts->gc);
421 if (ret)
422 dev_err(dev, "failed to remove gpio %d\n",
423 ts->gc.base);
424 }
425}
426#else
427static inline int ad7879_gpio_add(struct device *dev)
428{
429 return 0;
430}
431
432static inline void ad7879_gpio_remove(struct device *dev)
433{
434}
435#endif
369 436
370static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts) 437static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts)
371{ 438{
@@ -403,12 +470,6 @@ static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts)
403 ts->pen_down_acc_interval = pdata->pen_down_acc_interval; 470 ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
404 ts->median = pdata->median; 471 ts->median = pdata->median;
405 472
406 if (pdata->gpio_output)
407 ts->gpio_init = AD7879_GPIO_EN |
408 (pdata->gpio_default ? 0 : AD7879_GPIO_DATA);
409 else
410 ts->gpio_init = AD7879_GPIO_EN | AD7879_GPIODIR;
411
412 snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&bus->dev)); 473 snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&bus->dev));
413 474
414 input_dev->name = "AD7879 Touchscreen"; 475 input_dev->name = "AD7879 Touchscreen";
@@ -446,6 +507,23 @@ static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts)
446 goto err_free_mem; 507 goto err_free_mem;
447 } 508 }
448 509
510 ts->cmd_crtl3 = AD7879_YPLUS_BIT |
511 AD7879_XPLUS_BIT |
512 AD7879_Z2_BIT |
513 AD7879_Z1_BIT |
514 AD7879_TEMPMASK_BIT |
515 AD7879_AUXVBATMASK_BIT |
516 AD7879_GPIOALERTMASK_BIT;
517
518 ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR |
519 AD7879_AVG(ts->averaging) |
520 AD7879_MFS(ts->median) |
521 AD7879_FCD(ts->first_conversion_delay);
522
523 ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 |
524 AD7879_ACQ(ts->acquisition_time) |
525 AD7879_TMR(ts->pen_down_acc_interval);
526
449 ad7879_setup(ts); 527 ad7879_setup(ts);
450 528
451 err = request_irq(bus->irq, ad7879_irq, 529 err = request_irq(bus->irq, ad7879_irq,
@@ -460,15 +538,21 @@ static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts)
460 if (err) 538 if (err)
461 goto err_free_irq; 539 goto err_free_irq;
462 540
463 err = input_register_device(input_dev); 541 err = ad7879_gpio_add(&bus->dev);
464 if (err) 542 if (err)
465 goto err_remove_attr; 543 goto err_remove_attr;
466 544
545 err = input_register_device(input_dev);
546 if (err)
547 goto err_remove_gpio;
548
467 dev_info(&bus->dev, "Rev.%d touchscreen, irq %d\n", 549 dev_info(&bus->dev, "Rev.%d touchscreen, irq %d\n",
468 revid >> 8, bus->irq); 550 revid >> 8, bus->irq);
469 551
470 return 0; 552 return 0;
471 553
554err_remove_gpio:
555 ad7879_gpio_remove(&bus->dev);
472err_remove_attr: 556err_remove_attr:
473 sysfs_remove_group(&bus->dev.kobj, &ad7879_attr_group); 557 sysfs_remove_group(&bus->dev.kobj, &ad7879_attr_group);
474err_free_irq: 558err_free_irq:
@@ -481,6 +565,7 @@ err_free_mem:
481 565
482static int __devexit ad7879_destroy(bus_device *bus, struct ad7879 *ts) 566static int __devexit ad7879_destroy(bus_device *bus, struct ad7879 *ts)
483{ 567{
568 ad7879_gpio_remove(&bus->dev);
484 ad7879_disable(ts); 569 ad7879_disable(ts);
485 sysfs_remove_group(&ts->bus->dev.kobj, &ad7879_attr_group); 570 sysfs_remove_group(&ts->bus->dev.kobj, &ad7879_attr_group);
486 free_irq(ts->bus->irq, ts); 571 free_irq(ts->bus->irq, ts);
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index a6624ad252c5..1a1420d7a828 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -3152,7 +3152,7 @@ static void
3152hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx, 3152hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx,
3153 int slot_rx, int bank_rx) 3153 int slot_rx, int bank_rx)
3154{ 3154{
3155 if (slot_rx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) { 3155 if (slot_tx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) {
3156 /* disable PCM */ 3156 /* disable PCM */
3157 mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0); 3157 mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0);
3158 return; 3158 return;
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 951c57b0a7e0..ede46581351a 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -179,8 +179,10 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
179 * We assume the Guest has the same number of GDT entries as the 179 * We assume the Guest has the same number of GDT entries as the
180 * Host, otherwise we'd have to dynamically allocate the Guest GDT. 180 * Host, otherwise we'd have to dynamically allocate the Guest GDT.
181 */ 181 */
182 if (num >= ARRAY_SIZE(cpu->arch.gdt)) 182 if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
183 kill_guest(cpu, "too many gdt entries %i", num); 183 kill_guest(cpu, "too many gdt entries %i", num);
184 return;
185 }
184 186
185 /* Set it up, then fix it. */ 187 /* Set it up, then fix it. */
186 cpu->arch.gdt[num].a = lo; 188 cpu->arch.gdt[num].a = lo;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 96faa799b82a..f96feeb6b9ce 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -660,7 +660,7 @@ static int smu_platform_probe(struct of_device* dev,
660 return 0; 660 return 0;
661} 661}
662 662
663static struct of_device_id smu_platform_match[] = 663static const struct of_device_id smu_platform_match[] =
664{ 664{
665 { 665 {
666 .type = "smu", 666 .type = "smu",
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index ea32c7e5a9af..454bc501df3c 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2211,7 +2211,7 @@ static int fcu_of_remove(struct of_device* dev)
2211 return 0; 2211 return 0;
2212} 2212}
2213 2213
2214static struct of_device_id fcu_match[] = 2214static const struct of_device_id fcu_match[] =
2215{ 2215{
2216 { 2216 {
2217 .type = "fcu", 2217 .type = "fcu",
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 3fbe41b0ac07..ba48fd76396e 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -457,7 +457,7 @@ therm_of_remove( struct of_device *dev )
457 return 0; 457 return 0;
458} 458}
459 459
460static struct of_device_id therm_of_match[] = {{ 460static const struct of_device_id therm_of_match[] = {{
461 .name = "fan", 461 .name = "fan",
462 .compatible = "adm1030" 462 .compatible = "adm1030"
463 }, {} 463 }, {}
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 54abf9e303b7..f1c8cae70b4b 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
172{ 172{
173 int r = 0; 173 int r = 0;
174 size_t dummy = 0; 174 size_t dummy = 0;
175 int overhead_size = 175 int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
176 sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
177 struct dm_ulog_request *tfr = prealloced_ulog_tfr; 176 struct dm_ulog_request *tfr = prealloced_ulog_tfr;
178 struct receiving_pkg pkg; 177 struct receiving_pkg pkg;
179 178
179 /*
180 * Given the space needed to hold the 'struct cn_msg' and
181 * 'struct dm_ulog_request' - do we have enough payload
182 * space remaining?
183 */
180 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { 184 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
181 DMINFO("Size of tfr exceeds preallocated size"); 185 DMINFO("Size of tfr exceeds preallocated size");
182 return -EINVAL; 186 return -EINVAL;
@@ -191,7 +195,7 @@ resend:
191 */ 195 */
192 mutex_lock(&dm_ulog_lock); 196 mutex_lock(&dm_ulog_lock);
193 197
194 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); 198 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
195 memcpy(tfr->uuid, uuid, DM_UUID_LEN); 199 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
196 tfr->luid = luid; 200 tfr->luid = luid;
197 tfr->seq = dm_ulog_seq++; 201 tfr->seq = dm_ulog_seq++;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ad779bd13aec..6c1046df81f6 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
724 /* 724 /*
725 * Dispatch io. 725 * Dispatch io.
726 */ 726 */
727 if (unlikely(ms->log_failure)) { 727 if (unlikely(ms->log_failure) && errors_handled(ms)) {
728 spin_lock_irq(&ms->lock); 728 spin_lock_irq(&ms->lock);
729 bio_list_merge(&ms->failures, &sync); 729 bio_list_merge(&ms->failures, &sync);
730 spin_unlock_irq(&ms->lock); 730 spin_unlock_irq(&ms->lock);
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 5f19ceb6fe91..168bd38f5006 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success)
660 spin_lock_irq(&rh->region_lock); 660 spin_lock_irq(&rh->region_lock);
661 if (success) 661 if (success)
662 list_add(&reg->list, &reg->rh->recovered_regions); 662 list_add(&reg->list, &reg->rh->recovered_regions);
663 else { 663 else
664 reg->state = DM_RH_NOSYNC;
665 list_add(&reg->list, &reg->rh->failed_recovered_regions); 664 list_add(&reg->list, &reg->rh->failed_recovered_regions);
666 } 665
667 spin_unlock_irq(&rh->region_lock); 666 spin_unlock_irq(&rh->region_lock);
668 667
669 rh->wakeup_workers(rh->context); 668 rh->wakeup_workers(rh->context);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 7d08879689ac..c097d8a4823d 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
254 * Issue the synchronous I/O from a different thread 254 * Issue the synchronous I/O from a different thread
255 * to avoid generic_make_request recursion. 255 * to avoid generic_make_request recursion.
256 */ 256 */
257 INIT_WORK(&req.work, do_metadata); 257 INIT_WORK_ON_STACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work); 258 queue_work(ps->metadata_wq, &req.work);
259 flush_workqueue(ps->metadata_wq); 259 flush_workqueue(ps->metadata_wq);
260 260
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e0efc1adcaff..bd58703ee8f6 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
110 } 110 }
111 111
112 stripes = simple_strtoul(argv[0], &end, 10); 112 stripes = simple_strtoul(argv[0], &end, 10);
113 if (*end) { 113 if (!stripes || *end) {
114 ti->error = "Invalid stripe count"; 114 ti->error = "Invalid stripe count";
115 return -EINVAL; 115 return -EINVAL;
116 } 116 }
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index f53392df7b97..f91b40942e07 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -80,20 +80,12 @@ static struct sysfs_ops dm_sysfs_ops = {
80}; 80};
81 81
82/* 82/*
83 * The sysfs structure is embedded in md struct, nothing to do here
84 */
85static void dm_sysfs_release(struct kobject *kobj)
86{
87}
88
89/*
90 * dm kobject is embedded in mapped_device structure 83 * dm kobject is embedded in mapped_device structure
91 * no need to define release function here 84 * no need to define release function here
92 */ 85 */
93static struct kobj_type dm_ktype = { 86static struct kobj_type dm_ktype = {
94 .sysfs_ops = &dm_sysfs_ops, 87 .sysfs_ops = &dm_sysfs_ops,
95 .default_attrs = dm_attrs, 88 .default_attrs = dm_attrs,
96 .release = dm_sysfs_release
97}; 89};
98 90
99/* 91/*
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index be625475cf6d..4b22feb01a0c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -503,16 +503,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
503 return 0; 503 return 0;
504 } 504 }
505 505
506 if (blk_stack_limits(limits, &q->limits, start << 9) < 0) 506 if (bdev_stack_limits(limits, bdev, start) < 0)
507 DMWARN("%s: target device %s is misaligned: " 507 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
508 "physical_block_size=%u, logical_block_size=%u, " 508 "physical_block_size=%u, logical_block_size=%u, "
509 "alignment_offset=%u, start=%llu", 509 "alignment_offset=%u, start=%llu",
510 dm_device_name(ti->table->md), bdevname(bdev, b), 510 dm_device_name(ti->table->md), bdevname(bdev, b),
511 q->limits.physical_block_size, 511 q->limits.physical_block_size,
512 q->limits.logical_block_size, 512 q->limits.logical_block_size,
513 q->limits.alignment_offset, 513 q->limits.alignment_offset,
514 (unsigned long long) start << 9); 514 (unsigned long long) start << SECTOR_SHIFT);
515
516 515
517 /* 516 /*
518 * Check if merge fn is supported. 517 * Check if merge fn is supported.
@@ -1026,9 +1025,9 @@ combine_limits:
1026 * for the table. 1025 * for the table.
1027 */ 1026 */
1028 if (blk_stack_limits(limits, &ti_limits, 0) < 0) 1027 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1029 DMWARN("%s: target device " 1028 DMWARN("%s: adding target device "
1030 "(start sect %llu len %llu) " 1029 "(start sect %llu len %llu) "
1031 "is misaligned", 1030 "caused an alignment inconsistency",
1032 dm_device_name(table->md), 1031 dm_device_name(table->md),
1033 (unsigned long long) ti->begin, 1032 (unsigned long long) ti->begin,
1034 (unsigned long long) ti->len); 1033 (unsigned long long) ti->len);
@@ -1080,15 +1079,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1080 struct queue_limits *limits) 1079 struct queue_limits *limits)
1081{ 1080{
1082 /* 1081 /*
1083 * Each target device in the table has a data area that should normally
1084 * be aligned such that the DM device's alignment_offset is 0.
1085 * FIXME: Propagate alignment_offsets up the stack and warn of
1086 * sub-optimal or inconsistent settings.
1087 */
1088 limits->alignment_offset = 0;
1089 limits->misaligned = 0;
1090
1091 /*
1092 * Copy table's limits to the DM device's request_queue 1082 * Copy table's limits to the DM device's request_queue
1093 */ 1083 */
1094 q->limits = *limits; 1084 q->limits = *limits;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3167480b532c..aa4e2aa86d49 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
1595 return BLKPREP_OK; 1595 return BLKPREP_OK;
1596} 1596}
1597 1597
1598static void map_request(struct dm_target *ti, struct request *clone, 1598/*
1599 struct mapped_device *md) 1599 * Returns:
1600 * 0 : the request has been processed (not requeued)
1601 * !0 : the request has been requeued
1602 */
1603static int map_request(struct dm_target *ti, struct request *clone,
1604 struct mapped_device *md)
1600{ 1605{
1601 int r; 1606 int r, requeued = 0;
1602 struct dm_rq_target_io *tio = clone->end_io_data; 1607 struct dm_rq_target_io *tio = clone->end_io_data;
1603 1608
1604 /* 1609 /*
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone,
1625 case DM_MAPIO_REQUEUE: 1630 case DM_MAPIO_REQUEUE:
1626 /* The target wants to requeue the I/O */ 1631 /* The target wants to requeue the I/O */
1627 dm_requeue_unmapped_request(clone); 1632 dm_requeue_unmapped_request(clone);
1633 requeued = 1;
1628 break; 1634 break;
1629 default: 1635 default:
1630 if (r > 0) { 1636 if (r > 0) {
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone,
1636 dm_kill_unmapped_request(clone, r); 1642 dm_kill_unmapped_request(clone, r);
1637 break; 1643 break;
1638 } 1644 }
1645
1646 return requeued;
1639} 1647}
1640 1648
1641/* 1649/*
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q)
1677 atomic_inc(&md->pending[rq_data_dir(clone)]); 1685 atomic_inc(&md->pending[rq_data_dir(clone)]);
1678 1686
1679 spin_unlock(q->queue_lock); 1687 spin_unlock(q->queue_lock);
1680 map_request(ti, clone, md); 1688 if (map_request(ti, clone, md))
1689 goto requeued;
1690
1681 spin_lock_irq(q->queue_lock); 1691 spin_lock_irq(q->queue_lock);
1682 } 1692 }
1683 1693
1684 goto out; 1694 goto out;
1685 1695
1696requeued:
1697 spin_lock_irq(q->queue_lock);
1698
1686plug_and_out: 1699plug_and_out:
1687 if (!elv_queue_empty(q)) 1700 if (!elv_queue_empty(q))
1688 /* Some requests still remain, retry later */ 1701 /* Some requests still remain, retry later */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f4f5f82f9f53..a20a71e5efd3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -386,7 +386,9 @@ static void mddev_put(mddev_t *mddev)
386 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 386 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
387 return; 387 return;
388 if (!mddev->raid_disks && list_empty(&mddev->disks) && 388 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
389 !mddev->hold_active) { 389 mddev->ctime == 0 && !mddev->hold_active) {
390 /* Array is not configured at all, and not held active,
391 * so destroy it */
390 list_del(&mddev->all_mddevs); 392 list_del(&mddev->all_mddevs);
391 if (mddev->gendisk) { 393 if (mddev->gendisk) {
392 /* we did a probe so need to clean up. 394 /* we did a probe so need to clean up.
@@ -4073,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws)
4073{ 4075{
4074 mddev_t *mddev = container_of(ws, mddev_t, del_work); 4076 mddev_t *mddev = container_of(ws, mddev_t, del_work);
4075 4077
4076 if (mddev->private == &md_redundancy_group) { 4078 if (mddev->private) {
4077 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 4079 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
4080 if (mddev->private != (void*)1)
4081 sysfs_remove_group(&mddev->kobj, mddev->private);
4078 if (mddev->sysfs_action) 4082 if (mddev->sysfs_action)
4079 sysfs_put(mddev->sysfs_action); 4083 sysfs_put(mddev->sysfs_action);
4080 mddev->sysfs_action = NULL; 4084 mddev->sysfs_action = NULL;
@@ -4285,10 +4289,7 @@ static int do_md_run(mddev_t * mddev)
4285 sysfs_notify_dirent(rdev->sysfs_state); 4289 sysfs_notify_dirent(rdev->sysfs_state);
4286 } 4290 }
4287 4291
4288 md_probe(mddev->unit, NULL, NULL);
4289 disk = mddev->gendisk; 4292 disk = mddev->gendisk;
4290 if (!disk)
4291 return -ENOMEM;
4292 4293
4293 spin_lock(&pers_lock); 4294 spin_lock(&pers_lock);
4294 pers = find_pers(mddev->level, mddev->clevel); 4295 pers = find_pers(mddev->level, mddev->clevel);
@@ -4355,7 +4356,7 @@ static int do_md_run(mddev_t * mddev)
4355 mddev->barriers_work = 1; 4356 mddev->barriers_work = 1;
4356 mddev->ok_start_degraded = start_dirty_degraded; 4357 mddev->ok_start_degraded = start_dirty_degraded;
4357 4358
4358 if (start_readonly) 4359 if (start_readonly && mddev->ro == 0)
4359 mddev->ro = 2; /* read-only, but switch on first write */ 4360 mddev->ro = 2; /* read-only, but switch on first write */
4360 4361
4361 err = mddev->pers->run(mddev); 4362 err = mddev->pers->run(mddev);
@@ -4419,33 +4420,6 @@ static int do_md_run(mddev_t * mddev)
4419 4420
4420 set_capacity(disk, mddev->array_sectors); 4421 set_capacity(disk, mddev->array_sectors);
4421 4422
4422 /* If there is a partially-recovered drive we need to
4423 * start recovery here. If we leave it to md_check_recovery,
4424 * it will remove the drives and not do the right thing
4425 */
4426 if (mddev->degraded && !mddev->sync_thread) {
4427 int spares = 0;
4428 list_for_each_entry(rdev, &mddev->disks, same_set)
4429 if (rdev->raid_disk >= 0 &&
4430 !test_bit(In_sync, &rdev->flags) &&
4431 !test_bit(Faulty, &rdev->flags))
4432 /* complete an interrupted recovery */
4433 spares++;
4434 if (spares && mddev->pers->sync_request) {
4435 mddev->recovery = 0;
4436 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4437 mddev->sync_thread = md_register_thread(md_do_sync,
4438 mddev,
4439 "resync");
4440 if (!mddev->sync_thread) {
4441 printk(KERN_ERR "%s: could not start resync"
4442 " thread...\n",
4443 mdname(mddev));
4444 /* leave the spares where they are, it shouldn't hurt */
4445 mddev->recovery = 0;
4446 }
4447 }
4448 }
4449 md_wakeup_thread(mddev->thread); 4423 md_wakeup_thread(mddev->thread);
4450 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4424 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4451 4425
@@ -4555,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4555 mddev->queue->unplug_fn = NULL; 4529 mddev->queue->unplug_fn = NULL;
4556 mddev->queue->backing_dev_info.congested_fn = NULL; 4530 mddev->queue->backing_dev_info.congested_fn = NULL;
4557 module_put(mddev->pers->owner); 4531 module_put(mddev->pers->owner);
4558 if (mddev->pers->sync_request) 4532 if (mddev->pers->sync_request && mddev->private == NULL)
4559 mddev->private = &md_redundancy_group; 4533 mddev->private = (void*)1;
4560 mddev->pers = NULL; 4534 mddev->pers = NULL;
4561 /* tell userspace to handle 'inactive' */ 4535 /* tell userspace to handle 'inactive' */
4562 sysfs_notify_dirent(mddev->sysfs_state); 4536 sysfs_notify_dirent(mddev->sysfs_state);
@@ -4603,9 +4577,6 @@ out:
4603 } 4577 }
4604 mddev->bitmap_info.offset = 0; 4578 mddev->bitmap_info.offset = 0;
4605 4579
4606 /* make sure all md_delayed_delete calls have finished */
4607 flush_scheduled_work();
4608
4609 export_array(mddev); 4580 export_array(mddev);
4610 4581
4611 mddev->array_sectors = 0; 4582 mddev->array_sectors = 0;
@@ -5262,6 +5233,10 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5262 mddev->minor_version = info->minor_version; 5233 mddev->minor_version = info->minor_version;
5263 mddev->patch_version = info->patch_version; 5234 mddev->patch_version = info->patch_version;
5264 mddev->persistent = !info->not_persistent; 5235 mddev->persistent = !info->not_persistent;
5236 /* ensure mddev_put doesn't delete this now that there
5237 * is some minimal configuration.
5238 */
5239 mddev->ctime = get_seconds();
5265 return 0; 5240 return 0;
5266 } 5241 }
5267 mddev->major_version = MD_MAJOR_VERSION; 5242 mddev->major_version = MD_MAJOR_VERSION;
@@ -6494,10 +6469,11 @@ void md_do_sync(mddev_t *mddev)
6494 mddev->curr_resync = 2; 6469 mddev->curr_resync = 2;
6495 6470
6496 try_again: 6471 try_again:
6497 if (kthread_should_stop()) { 6472 if (kthread_should_stop())
6498 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6473 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6474
6475 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6499 goto skip; 6476 goto skip;
6500 }
6501 for_each_mddev(mddev2, tmp) { 6477 for_each_mddev(mddev2, tmp) {
6502 if (mddev2 == mddev) 6478 if (mddev2 == mddev)
6503 continue; 6479 continue;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e84204eb12df..ceb24afdc147 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev)
5136 mddev->thread = NULL; 5136 mddev->thread = NULL;
5137 mddev->queue->backing_dev_info.congested_fn = NULL; 5137 mddev->queue->backing_dev_info.congested_fn = NULL;
5138 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5138 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5139 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
5140 free_conf(conf); 5139 free_conf(conf);
5141 mddev->private = NULL; 5140 mddev->private = &raid5_attrs_group;
5142 return 0; 5141 return 0;
5143} 5142}
5144 5143
@@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev)
5464 !test_bit(Faulty, &rdev->flags)) { 5463 !test_bit(Faulty, &rdev->flags)) {
5465 if (raid5_add_disk(mddev, rdev) == 0) { 5464 if (raid5_add_disk(mddev, rdev) == 0) {
5466 char nm[20]; 5465 char nm[20];
5467 if (rdev->raid_disk >= conf->previous_raid_disks) 5466 if (rdev->raid_disk >= conf->previous_raid_disks) {
5468 set_bit(In_sync, &rdev->flags); 5467 set_bit(In_sync, &rdev->flags);
5469 else 5468 added_devices++;
5469 } else
5470 rdev->recovery_offset = 0; 5470 rdev->recovery_offset = 0;
5471 added_devices++;
5472 sprintf(nm, "rd%d", rdev->raid_disk); 5471 sprintf(nm, "rd%d", rdev->raid_disk);
5473 if (sysfs_create_link(&mddev->kobj, 5472 if (sysfs_create_link(&mddev->kobj,
5474 &rdev->kobj, nm)) 5473 &rdev->kobj, nm))
@@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev)
5480 break; 5479 break;
5481 } 5480 }
5482 5481
5482 /* When a reshape changes the number of devices, ->degraded
5483 * is measured against the large of the pre and post number of
5484 * devices.*/
5483 if (mddev->delta_disks > 0) { 5485 if (mddev->delta_disks > 0) {
5484 spin_lock_irqsave(&conf->device_lock, flags); 5486 spin_lock_irqsave(&conf->device_lock, flags);
5485 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) 5487 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5486 - added_devices; 5488 - added_devices;
5487 spin_unlock_irqrestore(&conf->device_lock, flags); 5489 spin_unlock_irqrestore(&conf->device_lock, flags);
5488 } 5490 }
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index bff7a5356037..b521ed9d6e2e 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -13,7 +13,7 @@
13 */ 13 */
14 14
15 15
16#include <linux/usb/input.h> 16#include <linux/input.h>
17#include <media/ir-common.h> 17#include <media/ir-common.h>
18 18
19#define IR_TAB_MIN_SIZE 32 19#define IR_TAB_MIN_SIZE 32
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index becbaadb3b77..5ed75263340a 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1333,9 +1333,9 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
1333 1333
1334 DEB_CAP(("vbuf:%p\n",vb)); 1334 DEB_CAP(("vbuf:%p\n",vb));
1335 1335
1336 release_all_pagetables(dev, buf);
1337
1338 saa7146_dma_free(dev,q,buf); 1336 saa7146_dma_free(dev,q,buf);
1337
1338 release_all_pagetables(dev, buf);
1339} 1339}
1340 1340
1341static struct videobuf_queue_ops video_qops = { 1341static struct videobuf_queue_ops video_qops = {
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index c190b0dedee4..2833137fa819 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -144,7 +144,8 @@ static void set_audio(struct dvb_frontend *fe,
144 } 144 }
145 145
146 if (params->mode == V4L2_TUNER_RADIO) { 146 if (params->mode == V4L2_TUNER_RADIO) {
147 priv->tda8290_easy_mode = 0x01; /* Start with MN values */ 147 /* Set TDA8295 to FM radio; Start TDA8290 with MN values */
148 priv->tda8290_easy_mode = (priv->ver & TDA8295) ? 0x80 : 0x01;
148 tuner_dbg("setting to radio FM\n"); 149 tuner_dbg("setting to radio FM\n");
149 } else { 150 } else {
150 tuner_dbg("setting tda829x to system %s\n", mode); 151 tuner_dbg("setting tda829x to system %s\n", mode);
@@ -672,16 +673,19 @@ static int tda8290_probe(struct tuner_i2c_props *i2c_props)
672static int tda8295_probe(struct tuner_i2c_props *i2c_props) 673static int tda8295_probe(struct tuner_i2c_props *i2c_props)
673{ 674{
674#define TDA8295_ID 0x8a 675#define TDA8295_ID 0x8a
676#define TDA8295C2_ID 0x8b
675 unsigned char tda8295_id[] = { 0x2f, 0x00 }; 677 unsigned char tda8295_id[] = { 0x2f, 0x00 };
676 678
677 /* detect tda8295 */ 679 /* detect tda8295 */
678 tuner_i2c_xfer_send(i2c_props, &tda8295_id[0], 1); 680 tuner_i2c_xfer_send(i2c_props, &tda8295_id[0], 1);
679 tuner_i2c_xfer_recv(i2c_props, &tda8295_id[1], 1); 681 tuner_i2c_xfer_recv(i2c_props, &tda8295_id[1], 1);
680 682
681 if (tda8295_id[1] == TDA8295_ID) { 683 if ((tda8295_id[1] & 0xfe) == TDA8295_ID) {
682 if (debug) 684 if (debug)
683 printk(KERN_DEBUG "%s: tda8295 detected @ %d-%04x\n", 685 printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n",
684 __func__, i2c_adapter_id(i2c_props->adap), 686 __func__, (tda8295_id[1] == TDA8295_ID) ?
687 "tda8295c1" : "tda8295c2",
688 i2c_adapter_id(i2c_props->adap),
685 i2c_props->addr); 689 i2c_props->addr);
686 return 0; 690 return 0;
687 } 691 }
diff --git a/drivers/media/dvb/Kconfig b/drivers/media/dvb/Kconfig
index 35d0817126e9..cf8f65f309da 100644
--- a/drivers/media/dvb/Kconfig
+++ b/drivers/media/dvb/Kconfig
@@ -72,6 +72,10 @@ comment "Supported Earthsoft PT1 Adapters"
72 depends on DVB_CORE && PCI && I2C 72 depends on DVB_CORE && PCI && I2C
73source "drivers/media/dvb/pt1/Kconfig" 73source "drivers/media/dvb/pt1/Kconfig"
74 74
75comment "Supported Mantis Adapters"
76 depends on DVB_CORE && PCI && I2C
77 source "drivers/media/dvb/mantis/Kconfig"
78
75comment "Supported DVB Frontends" 79comment "Supported DVB Frontends"
76 depends on DVB_CORE 80 depends on DVB_CORE
77source "drivers/media/dvb/frontends/Kconfig" 81source "drivers/media/dvb/frontends/Kconfig"
diff --git a/drivers/media/dvb/Makefile b/drivers/media/dvb/Makefile
index 16d262ddb45d..c12922c3659b 100644
--- a/drivers/media/dvb/Makefile
+++ b/drivers/media/dvb/Makefile
@@ -2,6 +2,18 @@
2# Makefile for the kernel multimedia device drivers. 2# Makefile for the kernel multimedia device drivers.
3# 3#
4 4
5obj-y := dvb-core/ frontends/ ttpci/ ttusb-dec/ ttusb-budget/ b2c2/ bt8xx/ dvb-usb/ pluto2/ siano/ dm1105/ pt1/ 5obj-y := dvb-core/ \
6 frontends/ \
7 ttpci/ \
8 ttusb-dec/ \
9 ttusb-budget/ \
10 b2c2/ \
11 bt8xx/ \
12 dvb-usb/ \
13 pluto2/ \
14 siano/ \
15 dm1105/ \
16 pt1/ \
17 mantis/
6 18
7obj-$(CONFIG_DVB_FIREDTV) += firewire/ 19obj-$(CONFIG_DVB_FIREDTV) += firewire/
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index c37790ad92d0..9ddc57909d49 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
761 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); 761 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
762 dmxdevfilter->type = DMXDEV_TYPE_NONE; 762 dmxdevfilter->type = DMXDEV_TYPE_NONE;
763 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); 763 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
764 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
765 init_timer(&dmxdevfilter->timer); 764 init_timer(&dmxdevfilter->timer);
766 765
767 dvbdev->users++; 766 dvbdev->users++;
@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
887 dmxdevfilter->type = DMXDEV_TYPE_PES; 886 dmxdevfilter->type = DMXDEV_TYPE_PES;
888 memcpy(&dmxdevfilter->params, params, 887 memcpy(&dmxdevfilter->params, params,
889 sizeof(struct dmx_pes_filter_params)); 888 sizeof(struct dmx_pes_filter_params));
889 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
890 890
891 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 891 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
892 892
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index b78cfb7d1897..67f189b7aa1f 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
426 }; 426 };
427 }; 427 };
428 428
429 if (dvb_demux_tscheck) { 429 if (demux->cnt_storage) {
430 if (!demux->cnt_storage)
431 demux->cnt_storage = vmalloc(MAX_PID + 1);
432
433 if (!demux->cnt_storage) {
434 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
435 dvb_demux_tscheck = 0;
436 goto no_dvb_demux_tscheck;
437 }
438
439 /* check pkt counter */ 430 /* check pkt counter */
440 if (pid < MAX_PID) { 431 if (pid < MAX_PID) {
441 if (buf[1] & 0x80) 432 if (buf[1] & 0x80)
@@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
454 }; 445 };
455 /* end check */ 446 /* end check */
456 }; 447 };
457no_dvb_demux_tscheck:
458 448
459 list_for_each_entry(feed, &demux->feed_list, list_head) { 449 list_for_each_entry(feed, &demux->feed_list, list_head) {
460 if ((feed->pid != pid) && (feed->pid != 0x2000)) 450 if ((feed->pid != pid) && (feed->pid != 0x2000))
@@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1246 dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); 1236 dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
1247 if (!dvbdemux->feed) { 1237 if (!dvbdemux->feed) {
1248 vfree(dvbdemux->filter); 1238 vfree(dvbdemux->filter);
1239 dvbdemux->filter = NULL;
1249 return -ENOMEM; 1240 return -ENOMEM;
1250 } 1241 }
1251 for (i = 0; i < dvbdemux->filternum; i++) { 1242 for (i = 0; i < dvbdemux->filternum; i++) {
@@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1257 dvbdemux->feed[i].index = i; 1248 dvbdemux->feed[i].index = i;
1258 } 1249 }
1259 1250
1251 if (dvb_demux_tscheck) {
1252 dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
1253
1254 if (!dvbdemux->cnt_storage)
1255 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
1256 }
1257
1260 INIT_LIST_HEAD(&dvbdemux->frontend_list); 1258 INIT_LIST_HEAD(&dvbdemux->frontend_list);
1261 1259
1262 for (i = 0; i < DMX_TS_PES_OTHER; i++) { 1260 for (i = 0; i < DMX_TS_PES_OTHER; i++) {
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index fe44789ab037..6223bf01efe9 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -202,14 +202,8 @@ static void handle_fcp(struct fw_card *card, struct fw_request *request,
202 unsigned long flags; 202 unsigned long flags;
203 int su; 203 int su;
204 204
205 if ((tcode != TCODE_WRITE_QUADLET_REQUEST && 205 if (length < 2 || (((u8 *)payload)[0] & 0xf0) != 0)
206 tcode != TCODE_WRITE_BLOCK_REQUEST) ||
207 offset != CSR_REGISTER_BASE + CSR_FCP_RESPONSE ||
208 length == 0 ||
209 (((u8 *)payload)[0] & 0xf0) != 0) {
210 fw_send_response(card, request, RCODE_TYPE_ERROR);
211 return; 206 return;
212 }
213 207
214 su = ((u8 *)payload)[1] & 0x7; 208 su = ((u8 *)payload)[1] & 0x7;
215 209
@@ -230,10 +224,8 @@ static void handle_fcp(struct fw_card *card, struct fw_request *request,
230 } 224 }
231 spin_unlock_irqrestore(&node_list_lock, flags); 225 spin_unlock_irqrestore(&node_list_lock, flags);
232 226
233 if (fdtv) { 227 if (fdtv)
234 avc_recv(fdtv, payload, length); 228 avc_recv(fdtv, payload, length);
235 fw_send_response(card, request, RCODE_COMPLETE);
236 }
237} 229}
238 230
239static struct fw_address_handler fcp_handler = { 231static struct fw_address_handler fcp_handler = {
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index a3b8b697349b..cd7f9b7cbffa 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -208,6 +208,14 @@ config DVB_DS3000
208 help 208 help
209 A DVB-S/S2 tuner module. Say Y when you want to support this frontend. 209 A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
210 210
211config DVB_MB86A16
212 tristate "Fujitsu MB86A16 based"
213 depends on DVB_CORE && I2C
214 default m if DVB_FE_CUSTOMISE
215 help
216 A DVB-S/DSS Direct Conversion reveiver.
217 Say Y when you want to support this frontend.
218
211comment "DVB-T (terrestrial) frontends" 219comment "DVB-T (terrestrial) frontends"
212 depends on DVB_CORE 220 depends on DVB_CORE
213 221
@@ -587,6 +595,17 @@ config DVB_ATBM8830
587 help 595 help
588 A DMB-TH tuner module. Say Y when you want to support this frontend. 596 A DMB-TH tuner module. Say Y when you want to support this frontend.
589 597
598config DVB_TDA665x
599 tristate "TDA665x tuner"
600 depends on DVB_CORE && I2C
601 default m if DVB_FE_CUSTOMISE
602 help
603 Support for tuner modules based on Philips TDA6650/TDA6651 chips.
604 Say Y when you want to support this chip.
605
606 Currently supported tuners:
607 * Panasonic ENV57H12D5 (ET-50DT)
608
590comment "Tools to develop new frontends" 609comment "Tools to develop new frontends"
591 610
592config DVB_DUMMY_FE 611config DVB_DUMMY_FE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 47575cc7b699..874e8ada4d1d 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_DVB_TDA10048) += tda10048.o
64obj-$(CONFIG_DVB_TUNER_CX24113) += cx24113.o 64obj-$(CONFIG_DVB_TUNER_CX24113) += cx24113.o
65obj-$(CONFIG_DVB_S5H1411) += s5h1411.o 65obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
66obj-$(CONFIG_DVB_LGS8GL5) += lgs8gl5.o 66obj-$(CONFIG_DVB_LGS8GL5) += lgs8gl5.o
67obj-$(CONFIG_DVB_TDA665x) += tda665x.o
67obj-$(CONFIG_DVB_LGS8GXX) += lgs8gxx.o 68obj-$(CONFIG_DVB_LGS8GXX) += lgs8gxx.o
68obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o 69obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o
69obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o 70obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o
@@ -80,3 +81,4 @@ obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
80obj-$(CONFIG_DVB_ISL6423) += isl6423.o 81obj-$(CONFIG_DVB_ISL6423) += isl6423.o
81obj-$(CONFIG_DVB_EC100) += ec100.o 82obj-$(CONFIG_DVB_EC100) += ec100.o
82obj-$(CONFIG_DVB_DS3000) += ds3000.o 83obj-$(CONFIG_DVB_DS3000) += ds3000.o
84obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
diff --git a/drivers/media/dvb/frontends/dib8000.h b/drivers/media/dvb/frontends/dib8000.h
index d99619ae983c..b1ee20799639 100644
--- a/drivers/media/dvb/frontends/dib8000.h
+++ b/drivers/media/dvb/frontends/dib8000.h
@@ -100,7 +100,7 @@ static inline int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_
100static inline enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe) 100static inline enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe)
101{ 101{
102 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 102 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
103 return CT_SHUTDOWN, 103 return CT_SHUTDOWN;
104} 104}
105static inline void dib8000_pwm_agc_reset(struct dvb_frontend *fe) 105static inline void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
106{ 106{
diff --git a/drivers/media/dvb/frontends/lgdt3305.h b/drivers/media/dvb/frontends/lgdt3305.h
index 4fa6e52d1fe8..9cb11c9cae53 100644
--- a/drivers/media/dvb/frontends/lgdt3305.h
+++ b/drivers/media/dvb/frontends/lgdt3305.h
@@ -54,13 +54,13 @@ struct lgdt3305_config {
54 u16 usref_qam256; /* default: 0x2a80 */ 54 u16 usref_qam256; /* default: 0x2a80 */
55 55
56 /* disable i2c repeater - 0:repeater enabled 1:repeater disabled */ 56 /* disable i2c repeater - 0:repeater enabled 1:repeater disabled */
57 int deny_i2c_rptr:1; 57 unsigned int deny_i2c_rptr:1;
58 58
59 /* spectral inversion - 0:disabled 1:enabled */ 59 /* spectral inversion - 0:disabled 1:enabled */
60 int spectral_inversion:1; 60 unsigned int spectral_inversion:1;
61 61
62 /* use RF AGC loop - 0:disabled 1:enabled */ 62 /* use RF AGC loop - 0:disabled 1:enabled */
63 int rf_agc_loop:1; 63 unsigned int rf_agc_loop:1;
64 64
65 enum lgdt3305_mpeg_mode mpeg_mode; 65 enum lgdt3305_mpeg_mode mpeg_mode;
66 enum lgdt3305_tp_clock_edge tpclk_edge; 66 enum lgdt3305_tp_clock_edge tpclk_edge;
diff --git a/drivers/media/dvb/frontends/mb86a16.c b/drivers/media/dvb/frontends/mb86a16.c
new file mode 100644
index 000000000000..d05f7500e0c5
--- /dev/null
+++ b/drivers/media/dvb/frontends/mb86a16.c
@@ -0,0 +1,1878 @@
1/*
2 Fujitsu MB86A16 DVB-S/DSS DC Receiver driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25
26#include "dvb_frontend.h"
27#include "mb86a16.h"
28#include "mb86a16_priv.h"
29
30unsigned int verbose = 5;
31module_param(verbose, int, 0644);
32
33#define ABS(x) ((x) < 0 ? (-x) : (x))
34
35struct mb86a16_state {
36 struct i2c_adapter *i2c_adap;
37 const struct mb86a16_config *config;
38 struct dvb_frontend frontend;
39
40 /* tuning parameters */
41 int frequency;
42 int srate;
43
44 /* Internal stuff */
45 int master_clk;
46 int deci;
47 int csel;
48 int rsel;
49};
50
51#define MB86A16_ERROR 0
52#define MB86A16_NOTICE 1
53#define MB86A16_INFO 2
54#define MB86A16_DEBUG 3
55
56#define dprintk(x, y, z, format, arg...) do { \
57 if (z) { \
58 if ((x > MB86A16_ERROR) && (x > y)) \
59 printk(KERN_ERR "%s: " format "\n", __func__, ##arg); \
60 else if ((x > MB86A16_NOTICE) && (x > y)) \
61 printk(KERN_NOTICE "%s: " format "\n", __func__, ##arg); \
62 else if ((x > MB86A16_INFO) && (x > y)) \
63 printk(KERN_INFO "%s: " format "\n", __func__, ##arg); \
64 else if ((x > MB86A16_DEBUG) && (x > y)) \
65 printk(KERN_DEBUG "%s: " format "\n", __func__, ##arg); \
66 } else { \
67 if (x > y) \
68 printk(format, ##arg); \
69 } \
70} while (0)
71
72#define TRACE_IN dprintk(verbose, MB86A16_DEBUG, 1, "-->()")
73#define TRACE_OUT dprintk(verbose, MB86A16_DEBUG, 1, "()-->")
74
75static int mb86a16_write(struct mb86a16_state *state, u8 reg, u8 val)
76{
77 int ret;
78 u8 buf[] = { reg, val };
79
80 struct i2c_msg msg = {
81 .addr = state->config->demod_address,
82 .flags = 0,
83 .buf = buf,
84 .len = 2
85 };
86
87 dprintk(verbose, MB86A16_DEBUG, 1,
88 "writing to [0x%02x],Reg[0x%02x],Data[0x%02x]",
89 state->config->demod_address, buf[0], buf[1]);
90
91 ret = i2c_transfer(state->i2c_adap, &msg, 1);
92
93 return (ret != 1) ? -EREMOTEIO : 0;
94}
95
96static int mb86a16_read(struct mb86a16_state *state, u8 reg, u8 *val)
97{
98 int ret;
99 u8 b0[] = { reg };
100 u8 b1[] = { 0 };
101
102 struct i2c_msg msg[] = {
103 {
104 .addr = state->config->demod_address,
105 .flags = 0,
106 .buf = b0,
107 .len = 1
108 }, {
109 .addr = state->config->demod_address,
110 .flags = I2C_M_RD,
111 .buf = b1,
112 .len = 1
113 }
114 };
115 ret = i2c_transfer(state->i2c_adap, msg, 2);
116 if (ret != 2) {
117 dprintk(verbose, MB86A16_ERROR, 1, "read error(reg=0x%02x, ret=0x%i)",
118 reg, ret);
119
120 return -EREMOTEIO;
121 }
122 *val = b1[0];
123
124 return ret;
125}
126
127static int CNTM_set(struct mb86a16_state *state,
128 unsigned char timint1,
129 unsigned char timint2,
130 unsigned char cnext)
131{
132 unsigned char val;
133
134 val = (timint1 << 4) | (timint2 << 2) | cnext;
135 if (mb86a16_write(state, MB86A16_CNTMR, val) < 0)
136 goto err;
137
138 return 0;
139
140err:
141 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
142 return -EREMOTEIO;
143}
144
145static int smrt_set(struct mb86a16_state *state, int rate)
146{
147 int tmp ;
148 int m ;
149 unsigned char STOFS0, STOFS1;
150
151 m = 1 << state->deci;
152 tmp = (8192 * state->master_clk - 2 * m * rate * 8192 + state->master_clk / 2) / state->master_clk;
153
154 STOFS0 = tmp & 0x0ff;
155 STOFS1 = (tmp & 0xf00) >> 8;
156
157 if (mb86a16_write(state, MB86A16_SRATE1, (state->deci << 2) |
158 (state->csel << 1) |
159 state->rsel) < 0)
160 goto err;
161 if (mb86a16_write(state, MB86A16_SRATE2, STOFS0) < 0)
162 goto err;
163 if (mb86a16_write(state, MB86A16_SRATE3, STOFS1) < 0)
164 goto err;
165
166 return 0;
167err:
168 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
169 return -1;
170}
171
172static int srst(struct mb86a16_state *state)
173{
174 if (mb86a16_write(state, MB86A16_RESET, 0x04) < 0)
175 goto err;
176
177 return 0;
178err:
179 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
180 return -EREMOTEIO;
181
182}
183
184static int afcex_data_set(struct mb86a16_state *state,
185 unsigned char AFCEX_L,
186 unsigned char AFCEX_H)
187{
188 if (mb86a16_write(state, MB86A16_AFCEXL, AFCEX_L) < 0)
189 goto err;
190 if (mb86a16_write(state, MB86A16_AFCEXH, AFCEX_H) < 0)
191 goto err;
192
193 return 0;
194err:
195 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
196
197 return -1;
198}
199
200static int afcofs_data_set(struct mb86a16_state *state,
201 unsigned char AFCEX_L,
202 unsigned char AFCEX_H)
203{
204 if (mb86a16_write(state, 0x58, AFCEX_L) < 0)
205 goto err;
206 if (mb86a16_write(state, 0x59, AFCEX_H) < 0)
207 goto err;
208
209 return 0;
210err:
211 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
212 return -EREMOTEIO;
213}
214
215static int stlp_set(struct mb86a16_state *state,
216 unsigned char STRAS,
217 unsigned char STRBS)
218{
219 if (mb86a16_write(state, MB86A16_STRFILTCOEF1, (STRBS << 3) | (STRAS)) < 0)
220 goto err;
221
222 return 0;
223err:
224 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
225 return -EREMOTEIO;
226}
227
228static int Vi_set(struct mb86a16_state *state, unsigned char ETH, unsigned char VIA)
229{
230 if (mb86a16_write(state, MB86A16_VISET2, 0x04) < 0)
231 goto err;
232 if (mb86a16_write(state, MB86A16_VISET3, 0xf5) < 0)
233 goto err;
234
235 return 0;
236err:
237 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
238 return -EREMOTEIO;
239}
240
241static int initial_set(struct mb86a16_state *state)
242{
243 if (stlp_set(state, 5, 7))
244 goto err;
245
246 udelay(100);
247 if (afcex_data_set(state, 0, 0))
248 goto err;
249
250 udelay(100);
251 if (afcofs_data_set(state, 0, 0))
252 goto err;
253
254 udelay(100);
255 if (mb86a16_write(state, MB86A16_CRLFILTCOEF1, 0x16) < 0)
256 goto err;
257 if (mb86a16_write(state, 0x2f, 0x21) < 0)
258 goto err;
259 if (mb86a16_write(state, MB86A16_VIMAG, 0x38) < 0)
260 goto err;
261 if (mb86a16_write(state, MB86A16_FAGCS1, 0x00) < 0)
262 goto err;
263 if (mb86a16_write(state, MB86A16_FAGCS2, 0x1c) < 0)
264 goto err;
265 if (mb86a16_write(state, MB86A16_FAGCS3, 0x20) < 0)
266 goto err;
267 if (mb86a16_write(state, MB86A16_FAGCS4, 0x1e) < 0)
268 goto err;
269 if (mb86a16_write(state, MB86A16_FAGCS5, 0x23) < 0)
270 goto err;
271 if (mb86a16_write(state, 0x54, 0xff) < 0)
272 goto err;
273 if (mb86a16_write(state, MB86A16_TSOUT, 0x00) < 0)
274 goto err;
275
276 return 0;
277
278err:
279 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
280 return -EREMOTEIO;
281}
282
283static int S01T_set(struct mb86a16_state *state,
284 unsigned char s1t,
285 unsigned s0t)
286{
287 if (mb86a16_write(state, 0x33, (s1t << 3) | s0t) < 0)
288 goto err;
289
290 return 0;
291err:
292 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
293 return -EREMOTEIO;
294}
295
296
297static int EN_set(struct mb86a16_state *state,
298 int cren,
299 int afcen)
300{
301 unsigned char val;
302
303 val = 0x7a | (cren << 7) | (afcen << 2);
304 if (mb86a16_write(state, 0x49, val) < 0)
305 goto err;
306
307 return 0;
308err:
309 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
310 return -EREMOTEIO;
311}
312
313static int AFCEXEN_set(struct mb86a16_state *state,
314 int afcexen,
315 int smrt)
316{
317 unsigned char AFCA ;
318
319 if (smrt > 18875)
320 AFCA = 4;
321 else if (smrt > 9375)
322 AFCA = 3;
323 else if (smrt > 2250)
324 AFCA = 2;
325 else
326 AFCA = 1;
327
328 if (mb86a16_write(state, 0x2a, 0x02 | (afcexen << 5) | (AFCA << 2)) < 0)
329 goto err;
330
331 return 0;
332
333err:
334 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
335 return -EREMOTEIO;
336}
337
338static int DAGC_data_set(struct mb86a16_state *state,
339 unsigned char DAGCA,
340 unsigned char DAGCW)
341{
342 if (mb86a16_write(state, 0x2d, (DAGCA << 3) | DAGCW) < 0)
343 goto err;
344
345 return 0;
346
347err:
348 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
349 return -EREMOTEIO;
350}
351
352static void smrt_info_get(struct mb86a16_state *state, int rate)
353{
354 if (rate >= 37501) {
355 state->deci = 0; state->csel = 0; state->rsel = 0;
356 } else if (rate >= 30001) {
357 state->deci = 0; state->csel = 0; state->rsel = 1;
358 } else if (rate >= 26251) {
359 state->deci = 0; state->csel = 1; state->rsel = 0;
360 } else if (rate >= 22501) {
361 state->deci = 0; state->csel = 1; state->rsel = 1;
362 } else if (rate >= 18751) {
363 state->deci = 1; state->csel = 0; state->rsel = 0;
364 } else if (rate >= 15001) {
365 state->deci = 1; state->csel = 0; state->rsel = 1;
366 } else if (rate >= 13126) {
367 state->deci = 1; state->csel = 1; state->rsel = 0;
368 } else if (rate >= 11251) {
369 state->deci = 1; state->csel = 1; state->rsel = 1;
370 } else if (rate >= 9376) {
371 state->deci = 2; state->csel = 0; state->rsel = 0;
372 } else if (rate >= 7501) {
373 state->deci = 2; state->csel = 0; state->rsel = 1;
374 } else if (rate >= 6563) {
375 state->deci = 2; state->csel = 1; state->rsel = 0;
376 } else if (rate >= 5626) {
377 state->deci = 2; state->csel = 1; state->rsel = 1;
378 } else if (rate >= 4688) {
379 state->deci = 3; state->csel = 0; state->rsel = 0;
380 } else if (rate >= 3751) {
381 state->deci = 3; state->csel = 0; state->rsel = 1;
382 } else if (rate >= 3282) {
383 state->deci = 3; state->csel = 1; state->rsel = 0;
384 } else if (rate >= 2814) {
385 state->deci = 3; state->csel = 1; state->rsel = 1;
386 } else if (rate >= 2344) {
387 state->deci = 4; state->csel = 0; state->rsel = 0;
388 } else if (rate >= 1876) {
389 state->deci = 4; state->csel = 0; state->rsel = 1;
390 } else if (rate >= 1641) {
391 state->deci = 4; state->csel = 1; state->rsel = 0;
392 } else if (rate >= 1407) {
393 state->deci = 4; state->csel = 1; state->rsel = 1;
394 } else if (rate >= 1172) {
395 state->deci = 5; state->csel = 0; state->rsel = 0;
396 } else if (rate >= 939) {
397 state->deci = 5; state->csel = 0; state->rsel = 1;
398 } else if (rate >= 821) {
399 state->deci = 5; state->csel = 1; state->rsel = 0;
400 } else {
401 state->deci = 5; state->csel = 1; state->rsel = 1;
402 }
403
404 if (state->csel == 0)
405 state->master_clk = 92000;
406 else
407 state->master_clk = 61333;
408
409}
410
411static int signal_det(struct mb86a16_state *state,
412 int smrt,
413 unsigned char *SIG)
414{
415
416 int ret ;
417 int smrtd ;
418 int wait_sym ;
419
420 u32 wait_t;
421 unsigned char S[3] ;
422 int i ;
423
424 if (*SIG > 45) {
425 if (CNTM_set(state, 2, 1, 2) < 0) {
426 dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
427 return -1;
428 }
429 wait_sym = 40000;
430 } else {
431 if (CNTM_set(state, 3, 1, 2) < 0) {
432 dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
433 return -1;
434 }
435 wait_sym = 80000;
436 }
437 for (i = 0; i < 3; i++) {
438 if (i == 0)
439 smrtd = smrt * 98 / 100;
440 else if (i == 1)
441 smrtd = smrt;
442 else
443 smrtd = smrt * 102 / 100;
444 smrt_info_get(state, smrtd);
445 smrt_set(state, smrtd);
446 srst(state);
447 wait_t = (wait_sym + 99 * smrtd / 100) / smrtd;
448 if (wait_t == 0)
449 wait_t = 1;
450 msleep_interruptible(10);
451 if (mb86a16_read(state, 0x37, &(S[i])) != 2) {
452 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
453 return -EREMOTEIO;
454 }
455 }
456 if ((S[1] > S[0] * 112 / 100) &&
457 (S[1] > S[2] * 112 / 100)) {
458
459 ret = 1;
460 } else {
461 ret = 0;
462 }
463 *SIG = S[1];
464
465 if (CNTM_set(state, 0, 1, 2) < 0) {
466 dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
467 return -1;
468 }
469
470 return ret;
471}
472
473static int rf_val_set(struct mb86a16_state *state,
474 int f,
475 int smrt,
476 unsigned char R)
477{
478 unsigned char C, F, B;
479 int M;
480 unsigned char rf_val[5];
481 int ack = -1;
482
483 if (smrt > 37750)
484 C = 1;
485 else if (smrt > 18875)
486 C = 2;
487 else if (smrt > 5500)
488 C = 3;
489 else
490 C = 4;
491
492 if (smrt > 30500)
493 F = 3;
494 else if (smrt > 9375)
495 F = 1;
496 else if (smrt > 4625)
497 F = 0;
498 else
499 F = 2;
500
501 if (f < 1060)
502 B = 0;
503 else if (f < 1175)
504 B = 1;
505 else if (f < 1305)
506 B = 2;
507 else if (f < 1435)
508 B = 3;
509 else if (f < 1570)
510 B = 4;
511 else if (f < 1715)
512 B = 5;
513 else if (f < 1845)
514 B = 6;
515 else if (f < 1980)
516 B = 7;
517 else if (f < 2080)
518 B = 8;
519 else
520 B = 9;
521
522 M = f * (1 << R) / 2;
523
524 rf_val[0] = 0x01 | (C << 3) | (F << 1);
525 rf_val[1] = (R << 5) | ((M & 0x1f000) >> 12);
526 rf_val[2] = (M & 0x00ff0) >> 4;
527 rf_val[3] = ((M & 0x0000f) << 4) | B;
528
529 /* Frequency Set */
530 if (mb86a16_write(state, 0x21, rf_val[0]) < 0)
531 ack = 0;
532 if (mb86a16_write(state, 0x22, rf_val[1]) < 0)
533 ack = 0;
534 if (mb86a16_write(state, 0x23, rf_val[2]) < 0)
535 ack = 0;
536 if (mb86a16_write(state, 0x24, rf_val[3]) < 0)
537 ack = 0;
538 if (mb86a16_write(state, 0x25, 0x01) < 0)
539 ack = 0;
540 if (ack == 0) {
541 dprintk(verbose, MB86A16_ERROR, 1, "RF Setup - I2C transfer error");
542 return -EREMOTEIO;
543 }
544
545 return 0;
546}
547
548static int afcerr_chk(struct mb86a16_state *state)
549{
550 unsigned char AFCM_L, AFCM_H ;
551 int AFCM ;
552 int afcm, afcerr ;
553
554 if (mb86a16_read(state, 0x0e, &AFCM_L) != 2)
555 goto err;
556 if (mb86a16_read(state, 0x0f, &AFCM_H) != 2)
557 goto err;
558
559 AFCM = (AFCM_H << 8) + AFCM_L;
560
561 if (AFCM > 2048)
562 afcm = AFCM - 4096;
563 else
564 afcm = AFCM;
565 afcerr = afcm * state->master_clk / 8192;
566
567 return afcerr;
568
569err:
570 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
571 return -EREMOTEIO;
572}
573
574static int dagcm_val_get(struct mb86a16_state *state)
575{
576 int DAGCM;
577 unsigned char DAGCM_H, DAGCM_L;
578
579 if (mb86a16_read(state, 0x45, &DAGCM_L) != 2)
580 goto err;
581 if (mb86a16_read(state, 0x46, &DAGCM_H) != 2)
582 goto err;
583
584 DAGCM = (DAGCM_H << 8) + DAGCM_L;
585
586 return DAGCM;
587
588err:
589 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
590 return -EREMOTEIO;
591}
592
593static int mb86a16_read_status(struct dvb_frontend *fe, fe_status_t *status)
594{
595 u8 stat, stat2;
596 struct mb86a16_state *state = fe->demodulator_priv;
597
598 *status = 0;
599
600 if (mb86a16_read(state, MB86A16_SIG1, &stat) != 2)
601 goto err;
602 if (mb86a16_read(state, MB86A16_SIG2, &stat2) != 2)
603 goto err;
604 if ((stat > 25) && (stat2 > 25))
605 *status |= FE_HAS_SIGNAL;
606 if ((stat > 45) && (stat2 > 45))
607 *status |= FE_HAS_CARRIER;
608
609 if (mb86a16_read(state, MB86A16_STATUS, &stat) != 2)
610 goto err;
611
612 if (stat & 0x01)
613 *status |= FE_HAS_SYNC;
614 if (stat & 0x01)
615 *status |= FE_HAS_VITERBI;
616
617 if (mb86a16_read(state, MB86A16_FRAMESYNC, &stat) != 2)
618 goto err;
619
620 if ((stat & 0x0f) && (*status & FE_HAS_VITERBI))
621 *status |= FE_HAS_LOCK;
622
623 return 0;
624
625err:
626 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
627 return -EREMOTEIO;
628}
629
630static int sync_chk(struct mb86a16_state *state,
631 unsigned char *VIRM)
632{
633 unsigned char val;
634 int sync;
635
636 if (mb86a16_read(state, 0x0d, &val) != 2)
637 goto err;
638
639 dprintk(verbose, MB86A16_INFO, 1, "Status = %02x,", val);
640 sync = val & 0x01;
641 *VIRM = (val & 0x1c) >> 2;
642
643 return sync;
644err:
645 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
646 return -EREMOTEIO;
647
648}
649
650static int freqerr_chk(struct mb86a16_state *state,
651 int fTP,
652 int smrt,
653 int unit)
654{
655 unsigned char CRM, AFCML, AFCMH;
656 unsigned char temp1, temp2, temp3;
657 int crm, afcm, AFCM;
658 int crrerr, afcerr; /* kHz */
659 int frqerr; /* MHz */
660 int afcen, afcexen = 0;
661 int R, M, fOSC, fOSC_OFS;
662
663 if (mb86a16_read(state, 0x43, &CRM) != 2)
664 goto err;
665
666 if (CRM > 127)
667 crm = CRM - 256;
668 else
669 crm = CRM;
670
671 crrerr = smrt * crm / 256;
672 if (mb86a16_read(state, 0x49, &temp1) != 2)
673 goto err;
674
675 afcen = (temp1 & 0x04) >> 2;
676 if (afcen == 0) {
677 if (mb86a16_read(state, 0x2a, &temp1) != 2)
678 goto err;
679 afcexen = (temp1 & 0x20) >> 5;
680 }
681
682 if (afcen == 1) {
683 if (mb86a16_read(state, 0x0e, &AFCML) != 2)
684 goto err;
685 if (mb86a16_read(state, 0x0f, &AFCMH) != 2)
686 goto err;
687 } else if (afcexen == 1) {
688 if (mb86a16_read(state, 0x2b, &AFCML) != 2)
689 goto err;
690 if (mb86a16_read(state, 0x2c, &AFCMH) != 2)
691 goto err;
692 }
693 if ((afcen == 1) || (afcexen == 1)) {
694 smrt_info_get(state, smrt);
695 AFCM = ((AFCMH & 0x01) << 8) + AFCML;
696 if (AFCM > 255)
697 afcm = AFCM - 512;
698 else
699 afcm = AFCM;
700
701 afcerr = afcm * state->master_clk / 8192;
702 } else
703 afcerr = 0;
704
705 if (mb86a16_read(state, 0x22, &temp1) != 2)
706 goto err;
707 if (mb86a16_read(state, 0x23, &temp2) != 2)
708 goto err;
709 if (mb86a16_read(state, 0x24, &temp3) != 2)
710 goto err;
711
712 R = (temp1 & 0xe0) >> 5;
713 M = ((temp1 & 0x1f) << 12) + (temp2 << 4) + (temp3 >> 4);
714 if (R == 0)
715 fOSC = 2 * M;
716 else
717 fOSC = M;
718
719 fOSC_OFS = fOSC - fTP;
720
721 if (unit == 0) { /* MHz */
722 if (crrerr + afcerr + fOSC_OFS * 1000 >= 0)
723 frqerr = (crrerr + afcerr + fOSC_OFS * 1000 + 500) / 1000;
724 else
725 frqerr = (crrerr + afcerr + fOSC_OFS * 1000 - 500) / 1000;
726 } else { /* kHz */
727 frqerr = crrerr + afcerr + fOSC_OFS * 1000;
728 }
729
730 return frqerr;
731err:
732 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
733 return -EREMOTEIO;
734}
735
736static unsigned char vco_dev_get(struct mb86a16_state *state, int smrt)
737{
738 unsigned char R;
739
740 if (smrt > 9375)
741 R = 0;
742 else
743 R = 1;
744
745 return R;
746}
747
748static void swp_info_get(struct mb86a16_state *state,
749 int fOSC_start,
750 int smrt,
751 int v, int R,
752 int swp_ofs,
753 int *fOSC,
754 int *afcex_freq,
755 unsigned char *AFCEX_L,
756 unsigned char *AFCEX_H)
757{
758 int AFCEX ;
759 int crnt_swp_freq ;
760
761 crnt_swp_freq = fOSC_start * 1000 + v * swp_ofs;
762
763 if (R == 0)
764 *fOSC = (crnt_swp_freq + 1000) / 2000 * 2;
765 else
766 *fOSC = (crnt_swp_freq + 500) / 1000;
767
768 if (*fOSC >= crnt_swp_freq)
769 *afcex_freq = *fOSC * 1000 - crnt_swp_freq;
770 else
771 *afcex_freq = crnt_swp_freq - *fOSC * 1000;
772
773 AFCEX = *afcex_freq * 8192 / state->master_clk;
774 *AFCEX_L = AFCEX & 0x00ff;
775 *AFCEX_H = (AFCEX & 0x0f00) >> 8;
776}
777
778
779static int swp_freq_calcuation(struct mb86a16_state *state, int i, int v, int *V, int vmax, int vmin,
780 int SIGMIN, int fOSC, int afcex_freq, int swp_ofs, unsigned char *SIG1)
781{
782 int swp_freq ;
783
784 if ((i % 2 == 1) && (v <= vmax)) {
785 /* positive v (case 1) */
786 if ((v - 1 == vmin) &&
787 (*(V + 30 + v) >= 0) &&
788 (*(V + 30 + v - 1) >= 0) &&
789 (*(V + 30 + v - 1) > *(V + 30 + v)) &&
790 (*(V + 30 + v - 1) > SIGMIN)) {
791
792 swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
793 *SIG1 = *(V + 30 + v - 1);
794 } else if ((v == vmax) &&
795 (*(V + 30 + v) >= 0) &&
796 (*(V + 30 + v - 1) >= 0) &&
797 (*(V + 30 + v) > *(V + 30 + v - 1)) &&
798 (*(V + 30 + v) > SIGMIN)) {
799 /* (case 2) */
800 swp_freq = fOSC * 1000 + afcex_freq;
801 *SIG1 = *(V + 30 + v);
802 } else if ((*(V + 30 + v) > 0) &&
803 (*(V + 30 + v - 1) > 0) &&
804 (*(V + 30 + v - 2) > 0) &&
805 (*(V + 30 + v - 3) > 0) &&
806 (*(V + 30 + v - 1) > *(V + 30 + v)) &&
807 (*(V + 30 + v - 2) > *(V + 30 + v - 3)) &&
808 ((*(V + 30 + v - 1) > SIGMIN) ||
809 (*(V + 30 + v - 2) > SIGMIN))) {
810 /* (case 3) */
811 if (*(V + 30 + v - 1) >= *(V + 30 + v - 2)) {
812 swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
813 *SIG1 = *(V + 30 + v - 1);
814 } else {
815 swp_freq = fOSC * 1000 + afcex_freq - swp_ofs * 2;
816 *SIG1 = *(V + 30 + v - 2);
817 }
818 } else if ((v == vmax) &&
819 (*(V + 30 + v) >= 0) &&
820 (*(V + 30 + v - 1) >= 0) &&
821 (*(V + 30 + v - 2) >= 0) &&
822 (*(V + 30 + v) > *(V + 30 + v - 2)) &&
823 (*(V + 30 + v - 1) > *(V + 30 + v - 2)) &&
824 ((*(V + 30 + v) > SIGMIN) ||
825 (*(V + 30 + v - 1) > SIGMIN))) {
826 /* (case 4) */
827 if (*(V + 30 + v) >= *(V + 30 + v - 1)) {
828 swp_freq = fOSC * 1000 + afcex_freq;
829 *SIG1 = *(V + 30 + v);
830 } else {
831 swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
832 *SIG1 = *(V + 30 + v - 1);
833 }
834 } else {
835 swp_freq = -1 ;
836 }
837 } else if ((i % 2 == 0) && (v >= vmin)) {
838 /* Negative v (case 1) */
839 if ((*(V + 30 + v) > 0) &&
840 (*(V + 30 + v + 1) > 0) &&
841 (*(V + 30 + v + 2) > 0) &&
842 (*(V + 30 + v + 1) > *(V + 30 + v)) &&
843 (*(V + 30 + v + 1) > *(V + 30 + v + 2)) &&
844 (*(V + 30 + v + 1) > SIGMIN)) {
845
846 swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
847 *SIG1 = *(V + 30 + v + 1);
848 } else if ((v + 1 == vmax) &&
849 (*(V + 30 + v) >= 0) &&
850 (*(V + 30 + v + 1) >= 0) &&
851 (*(V + 30 + v + 1) > *(V + 30 + v)) &&
852 (*(V + 30 + v + 1) > SIGMIN)) {
853 /* (case 2) */
854 swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
855 *SIG1 = *(V + 30 + v);
856 } else if ((v == vmin) &&
857 (*(V + 30 + v) > 0) &&
858 (*(V + 30 + v + 1) > 0) &&
859 (*(V + 30 + v + 2) > 0) &&
860 (*(V + 30 + v) > *(V + 30 + v + 1)) &&
861 (*(V + 30 + v) > *(V + 30 + v + 2)) &&
862 (*(V + 30 + v) > SIGMIN)) {
863 /* (case 3) */
864 swp_freq = fOSC * 1000 + afcex_freq;
865 *SIG1 = *(V + 30 + v);
866 } else if ((*(V + 30 + v) >= 0) &&
867 (*(V + 30 + v + 1) >= 0) &&
868 (*(V + 30 + v + 2) >= 0) &&
869 (*(V + 30 + v + 3) >= 0) &&
870 (*(V + 30 + v + 1) > *(V + 30 + v)) &&
871 (*(V + 30 + v + 2) > *(V + 30 + v + 3)) &&
872 ((*(V + 30 + v + 1) > SIGMIN) ||
873 (*(V + 30 + v + 2) > SIGMIN))) {
874 /* (case 4) */
875 if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) {
876 swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
877 *SIG1 = *(V + 30 + v + 1);
878 } else {
879 swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2;
880 *SIG1 = *(V + 30 + v + 2);
881 }
882 } else if ((*(V + 30 + v) >= 0) &&
883 (*(V + 30 + v + 1) >= 0) &&
884 (*(V + 30 + v + 2) >= 0) &&
885 (*(V + 30 + v + 3) >= 0) &&
886 (*(V + 30 + v) > *(V + 30 + v + 2)) &&
887 (*(V + 30 + v + 1) > *(V + 30 + v + 2)) &&
888 (*(V + 30 + v) > *(V + 30 + v + 3)) &&
889 (*(V + 30 + v + 1) > *(V + 30 + v + 3)) &&
890 ((*(V + 30 + v) > SIGMIN) ||
891 (*(V + 30 + v + 1) > SIGMIN))) {
892 /* (case 5) */
893 if (*(V + 30 + v) >= *(V + 30 + v + 1)) {
894 swp_freq = fOSC * 1000 + afcex_freq;
895 *SIG1 = *(V + 30 + v);
896 } else {
897 swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
898 *SIG1 = *(V + 30 + v + 1);
899 }
900 } else if ((v + 2 == vmin) &&
901 (*(V + 30 + v) >= 0) &&
902 (*(V + 30 + v + 1) >= 0) &&
903 (*(V + 30 + v + 2) >= 0) &&
904 (*(V + 30 + v + 1) > *(V + 30 + v)) &&
905 (*(V + 30 + v + 2) > *(V + 30 + v)) &&
906 ((*(V + 30 + v + 1) > SIGMIN) ||
907 (*(V + 30 + v + 2) > SIGMIN))) {
908 /* (case 6) */
909 if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) {
910 swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
911 *SIG1 = *(V + 30 + v + 1);
912 } else {
913 swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2;
914 *SIG1 = *(V + 30 + v + 2);
915 }
916 } else if ((vmax == 0) && (vmin == 0) && (*(V + 30 + v) > SIGMIN)) {
917 swp_freq = fOSC * 1000;
918 *SIG1 = *(V + 30 + v);
919 } else
920 swp_freq = -1;
921 } else
922 swp_freq = -1;
923
924 return swp_freq;
925}
926
927static void swp_info_get2(struct mb86a16_state *state,
928 int smrt,
929 int R,
930 int swp_freq,
931 int *afcex_freq,
932 int *fOSC,
933 unsigned char *AFCEX_L,
934 unsigned char *AFCEX_H)
935{
936 int AFCEX ;
937
938 if (R == 0)
939 *fOSC = (swp_freq + 1000) / 2000 * 2;
940 else
941 *fOSC = (swp_freq + 500) / 1000;
942
943 if (*fOSC >= swp_freq)
944 *afcex_freq = *fOSC * 1000 - swp_freq;
945 else
946 *afcex_freq = swp_freq - *fOSC * 1000;
947
948 AFCEX = *afcex_freq * 8192 / state->master_clk;
949 *AFCEX_L = AFCEX & 0x00ff;
950 *AFCEX_H = (AFCEX & 0x0f00) >> 8;
951}
952
953static void afcex_info_get(struct mb86a16_state *state,
954 int afcex_freq,
955 unsigned char *AFCEX_L,
956 unsigned char *AFCEX_H)
957{
958 int AFCEX ;
959
960 AFCEX = afcex_freq * 8192 / state->master_clk;
961 *AFCEX_L = AFCEX & 0x00ff;
962 *AFCEX_H = (AFCEX & 0x0f00) >> 8;
963}
964
965static int SEQ_set(struct mb86a16_state *state, unsigned char loop)
966{
967 /* SLOCK0 = 0 */
968 if (mb86a16_write(state, 0x32, 0x02 | (loop << 2)) < 0) {
969 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
970 return -EREMOTEIO;
971 }
972
973 return 0;
974}
975
976static int iq_vt_set(struct mb86a16_state *state, unsigned char IQINV)
977{
978 /* Viterbi Rate, IQ Settings */
979 if (mb86a16_write(state, 0x06, 0xdf | (IQINV << 5)) < 0) {
980 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
981 return -EREMOTEIO;
982 }
983
984 return 0;
985}
986
987static int FEC_srst(struct mb86a16_state *state)
988{
989 if (mb86a16_write(state, MB86A16_RESET, 0x02) < 0) {
990 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
991 return -EREMOTEIO;
992 }
993
994 return 0;
995}
996
997static int S2T_set(struct mb86a16_state *state, unsigned char S2T)
998{
999 if (mb86a16_write(state, 0x34, 0x70 | S2T) < 0) {
1000 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1001 return -EREMOTEIO;
1002 }
1003
1004 return 0;
1005}
1006
1007static int S45T_set(struct mb86a16_state *state, unsigned char S4T, unsigned char S5T)
1008{
1009 if (mb86a16_write(state, 0x35, 0x00 | (S5T << 4) | S4T) < 0) {
1010 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1011 return -EREMOTEIO;
1012 }
1013
1014 return 0;
1015}
1016
1017
1018static int mb86a16_set_fe(struct mb86a16_state *state)
1019{
1020 u8 agcval, cnmval;
1021
1022 int i, j;
1023 int fOSC = 0;
1024 int fOSC_start = 0;
1025 int wait_t;
1026 int fcp;
1027 int swp_ofs;
1028 int V[60];
1029 u8 SIG1MIN;
1030
1031 unsigned char CREN, AFCEN, AFCEXEN;
1032 unsigned char SIG1;
1033 unsigned char TIMINT1, TIMINT2, TIMEXT;
1034 unsigned char S0T, S1T;
1035 unsigned char S2T;
1036/* unsigned char S2T, S3T; */
1037 unsigned char S4T, S5T;
1038 unsigned char AFCEX_L, AFCEX_H;
1039 unsigned char R;
1040 unsigned char VIRM;
1041 unsigned char ETH, VIA;
1042 unsigned char junk;
1043
1044 int loop;
1045 int ftemp;
1046 int v, vmax, vmin;
1047 int vmax_his, vmin_his;
1048 int swp_freq, prev_swp_freq[20];
1049 int prev_freq_num;
1050 int signal_dupl;
1051 int afcex_freq;
1052 int signal;
1053 int afcerr;
1054 int temp_freq, delta_freq;
1055 int dagcm[4];
1056 int smrt_d;
1057/* int freq_err; */
1058 int n;
1059 int ret = -1;
1060 int sync;
1061
1062 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
1063
1064 fcp = 3000;
1065 swp_ofs = state->srate / 4;
1066
1067 for (i = 0; i < 60; i++)
1068 V[i] = -1;
1069
1070 for (i = 0; i < 20; i++)
1071 prev_swp_freq[i] = 0;
1072
1073 SIG1MIN = 25;
1074
1075 for (n = 0; ((n < 3) && (ret == -1)); n++) {
1076 SEQ_set(state, 0);
1077 iq_vt_set(state, 0);
1078
1079 CREN = 0;
1080 AFCEN = 0;
1081 AFCEXEN = 1;
1082 TIMINT1 = 0;
1083 TIMINT2 = 1;
1084 TIMEXT = 2;
1085 S1T = 0;
1086 S0T = 0;
1087
1088 if (initial_set(state) < 0) {
1089 dprintk(verbose, MB86A16_ERROR, 1, "initial set failed");
1090 return -1;
1091 }
1092 if (DAGC_data_set(state, 3, 2) < 0) {
1093 dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
1094 return -1;
1095 }
1096 if (EN_set(state, CREN, AFCEN) < 0) {
1097 dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
1098 return -1; /* (0, 0) */
1099 }
1100 if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
1101 dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
1102 return -1; /* (1, smrt) = (1, symbolrate) */
1103 }
1104 if (CNTM_set(state, TIMINT1, TIMINT2, TIMEXT) < 0) {
1105 dprintk(verbose, MB86A16_ERROR, 1, "CNTM set error");
1106 return -1; /* (0, 1, 2) */
1107 }
1108 if (S01T_set(state, S1T, S0T) < 0) {
1109 dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
1110 return -1; /* (0, 0) */
1111 }
1112 smrt_info_get(state, state->srate);
1113 if (smrt_set(state, state->srate) < 0) {
1114 dprintk(verbose, MB86A16_ERROR, 1, "smrt info get error");
1115 return -1;
1116 }
1117
1118 R = vco_dev_get(state, state->srate);
1119 if (R == 1)
1120 fOSC_start = state->frequency;
1121
1122 else if (R == 0) {
1123 if (state->frequency % 2 == 0) {
1124 fOSC_start = state->frequency;
1125 } else {
1126 fOSC_start = state->frequency + 1;
1127 if (fOSC_start > 2150)
1128 fOSC_start = state->frequency - 1;
1129 }
1130 }
1131 loop = 1;
1132 ftemp = fOSC_start * 1000;
1133 vmax = 0 ;
1134 while (loop == 1) {
1135 ftemp = ftemp + swp_ofs;
1136 vmax++;
1137
1138 /* Upper bound */
1139 if (ftemp > 2150000) {
1140 loop = 0;
1141 vmax--;
1142 } else {
1143 if ((ftemp == 2150000) ||
1144 (ftemp - state->frequency * 1000 >= fcp + state->srate / 4))
1145 loop = 0;
1146 }
1147 }
1148
1149 loop = 1;
1150 ftemp = fOSC_start * 1000;
1151 vmin = 0 ;
1152 while (loop == 1) {
1153 ftemp = ftemp - swp_ofs;
1154 vmin--;
1155
1156 /* Lower bound */
1157 if (ftemp < 950000) {
1158 loop = 0;
1159 vmin++;
1160 } else {
1161 if ((ftemp == 950000) ||
1162 (state->frequency * 1000 - ftemp >= fcp + state->srate / 4))
1163 loop = 0;
1164 }
1165 }
1166
1167 wait_t = (8000 + state->srate / 2) / state->srate;
1168 if (wait_t == 0)
1169 wait_t = 1;
1170
1171 i = 0;
1172 j = 0;
1173 prev_freq_num = 0;
1174 loop = 1;
1175 signal = 0;
1176 vmax_his = 0;
1177 vmin_his = 0;
1178 v = 0;
1179
1180 while (loop == 1) {
1181 swp_info_get(state, fOSC_start, state->srate,
1182 v, R, swp_ofs, &fOSC,
1183 &afcex_freq, &AFCEX_L, &AFCEX_H);
1184
1185 udelay(100);
1186 if (rf_val_set(state, fOSC, state->srate, R) < 0) {
1187 dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
1188 return -1;
1189 }
1190 udelay(100);
1191 if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
1192 dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
1193 return -1;
1194 }
1195 if (srst(state) < 0) {
1196 dprintk(verbose, MB86A16_ERROR, 1, "srst error");
1197 return -1;
1198 }
1199 msleep_interruptible(wait_t);
1200
1201 if (mb86a16_read(state, 0x37, &SIG1) != 2) {
1202 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1203 return -1;
1204 }
1205 V[30 + v] = SIG1 ;
1206 swp_freq = swp_freq_calcuation(state, i, v, V, vmax, vmin,
1207 SIG1MIN, fOSC, afcex_freq,
1208 swp_ofs, &SIG1); /* changed */
1209
1210 signal_dupl = 0;
1211 for (j = 0; j < prev_freq_num; j++) {
1212 if ((ABS(prev_swp_freq[j] - swp_freq)) < (swp_ofs * 3 / 2)) {
1213 signal_dupl = 1;
1214 dprintk(verbose, MB86A16_INFO, 1, "Probably Duplicate Signal, j = %d", j);
1215 }
1216 }
1217 if ((signal_dupl == 0) && (swp_freq > 0) && (ABS(swp_freq - state->frequency * 1000) < fcp + state->srate / 6)) {
1218 dprintk(verbose, MB86A16_DEBUG, 1, "------ Signal detect ------ [swp_freq=[%07d, srate=%05d]]", swp_freq, state->srate);
1219 prev_swp_freq[prev_freq_num] = swp_freq;
1220 prev_freq_num++;
1221 swp_info_get2(state, state->srate, R, swp_freq,
1222 &afcex_freq, &fOSC,
1223 &AFCEX_L, &AFCEX_H);
1224
1225 if (rf_val_set(state, fOSC, state->srate, R) < 0) {
1226 dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
1227 return -1;
1228 }
1229 if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
1230 dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
1231 return -1;
1232 }
1233 signal = signal_det(state, state->srate, &SIG1);
1234 if (signal == 1) {
1235 dprintk(verbose, MB86A16_ERROR, 1, "***** Signal Found *****");
1236 loop = 0;
1237 } else {
1238 dprintk(verbose, MB86A16_ERROR, 1, "!!!!! No signal !!!!!, try again...");
1239 smrt_info_get(state, state->srate);
1240 if (smrt_set(state, state->srate) < 0) {
1241 dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
1242 return -1;
1243 }
1244 }
1245 }
1246 if (v > vmax)
1247 vmax_his = 1 ;
1248 if (v < vmin)
1249 vmin_his = 1 ;
1250 i++;
1251
1252 if ((i % 2 == 1) && (vmax_his == 1))
1253 i++;
1254 if ((i % 2 == 0) && (vmin_his == 1))
1255 i++;
1256
1257 if (i % 2 == 1)
1258 v = (i + 1) / 2;
1259 else
1260 v = -i / 2;
1261
1262 if ((vmax_his == 1) && (vmin_his == 1))
1263 loop = 0 ;
1264 }
1265
1266 if (signal == 1) {
1267 dprintk(verbose, MB86A16_INFO, 1, " Start Freq Error Check");
1268 S1T = 7 ;
1269 S0T = 1 ;
1270 CREN = 0 ;
1271 AFCEN = 1 ;
1272 AFCEXEN = 0 ;
1273
1274 if (S01T_set(state, S1T, S0T) < 0) {
1275 dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
1276 return -1;
1277 }
1278 smrt_info_get(state, state->srate);
1279 if (smrt_set(state, state->srate) < 0) {
1280 dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
1281 return -1;
1282 }
1283 if (EN_set(state, CREN, AFCEN) < 0) {
1284 dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
1285 return -1;
1286 }
1287 if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
1288 dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
1289 return -1;
1290 }
1291 afcex_info_get(state, afcex_freq, &AFCEX_L, &AFCEX_H);
1292 if (afcofs_data_set(state, AFCEX_L, AFCEX_H) < 0) {
1293 dprintk(verbose, MB86A16_ERROR, 1, "AFCOFS data set error");
1294 return -1;
1295 }
1296 if (srst(state) < 0) {
1297 dprintk(verbose, MB86A16_ERROR, 1, "srst error");
1298 return -1;
1299 }
1300 /* delay 4~200 */
1301 wait_t = 200000 / state->master_clk + 200000 / state->srate;
1302 msleep(wait_t);
1303 afcerr = afcerr_chk(state);
1304 if (afcerr == -1)
1305 return -1;
1306
1307 swp_freq = fOSC * 1000 + afcerr ;
1308 AFCEXEN = 1 ;
1309 if (state->srate >= 1500)
1310 smrt_d = state->srate / 3;
1311 else
1312 smrt_d = state->srate / 2;
1313 smrt_info_get(state, smrt_d);
1314 if (smrt_set(state, smrt_d) < 0) {
1315 dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
1316 return -1;
1317 }
1318 if (AFCEXEN_set(state, AFCEXEN, smrt_d) < 0) {
1319 dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
1320 return -1;
1321 }
1322 R = vco_dev_get(state, smrt_d);
1323 if (DAGC_data_set(state, 2, 0) < 0) {
1324 dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
1325 return -1;
1326 }
1327 for (i = 0; i < 3; i++) {
1328 temp_freq = swp_freq + (i - 1) * state->srate / 8;
1329 swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
1330 if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
1331 dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
1332 return -1;
1333 }
1334 if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
1335 dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
1336 return -1;
1337 }
1338 wait_t = 200000 / state->master_clk + 40000 / smrt_d;
1339 msleep(wait_t);
1340 dagcm[i] = dagcm_val_get(state);
1341 }
1342 if ((dagcm[0] > dagcm[1]) &&
1343 (dagcm[0] > dagcm[2]) &&
1344 (dagcm[0] - dagcm[1] > 2 * (dagcm[2] - dagcm[1]))) {
1345
1346 temp_freq = swp_freq - 2 * state->srate / 8;
1347 swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
1348 if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
1349 dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
1350 return -1;
1351 }
1352 if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
1353 dprintk(verbose, MB86A16_ERROR, 1, "afcex data set");
1354 return -1;
1355 }
1356 wait_t = 200000 / state->master_clk + 40000 / smrt_d;
1357 msleep(wait_t);
1358 dagcm[3] = dagcm_val_get(state);
1359 if (dagcm[3] > dagcm[1])
1360 delta_freq = (dagcm[2] - dagcm[0] + dagcm[1] - dagcm[3]) * state->srate / 300;
1361 else
1362 delta_freq = 0;
1363 } else if ((dagcm[2] > dagcm[1]) &&
1364 (dagcm[2] > dagcm[0]) &&
1365 (dagcm[2] - dagcm[1] > 2 * (dagcm[0] - dagcm[1]))) {
1366
1367 temp_freq = swp_freq + 2 * state->srate / 8;
1368 swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
1369 if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
1370 dprintk(verbose, MB86A16_ERROR, 1, "rf val set");
1371 return -1;
1372 }
1373 if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
1374 dprintk(verbose, MB86A16_ERROR, 1, "afcex data set");
1375 return -1;
1376 }
1377 wait_t = 200000 / state->master_clk + 40000 / smrt_d;
1378 msleep(wait_t);
1379 dagcm[3] = dagcm_val_get(state);
1380 if (dagcm[3] > dagcm[1])
1381 delta_freq = (dagcm[2] - dagcm[0] + dagcm[3] - dagcm[1]) * state->srate / 300;
1382 else
1383 delta_freq = 0 ;
1384
1385 } else {
1386 delta_freq = 0 ;
1387 }
1388 dprintk(verbose, MB86A16_INFO, 1, "SWEEP Frequency = %d", swp_freq);
1389 swp_freq += delta_freq;
1390 dprintk(verbose, MB86A16_INFO, 1, "Adjusting .., DELTA Freq = %d, SWEEP Freq=%d", delta_freq, swp_freq);
1391 if (ABS(state->frequency * 1000 - swp_freq) > 3800) {
1392 dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL !");
1393 } else {
1394
1395 S1T = 0;
1396 S0T = 3;
1397 CREN = 1;
1398 AFCEN = 0;
1399 AFCEXEN = 1;
1400
1401 if (S01T_set(state, S1T, S0T) < 0) {
1402 dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
1403 return -1;
1404 }
1405 if (DAGC_data_set(state, 0, 0) < 0) {
1406 dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
1407 return -1;
1408 }
1409 R = vco_dev_get(state, state->srate);
1410 smrt_info_get(state, state->srate);
1411 if (smrt_set(state, state->srate) < 0) {
1412 dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
1413 return -1;
1414 }
1415 if (EN_set(state, CREN, AFCEN) < 0) {
1416 dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
1417 return -1;
1418 }
1419 if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
1420 dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
1421 return -1;
1422 }
1423 swp_info_get2(state, state->srate, R, swp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
1424 if (rf_val_set(state, fOSC, state->srate, R) < 0) {
1425 dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
1426 return -1;
1427 }
1428 if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
1429 dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
1430 return -1;
1431 }
1432 if (srst(state) < 0) {
1433 dprintk(verbose, MB86A16_ERROR, 1, "srst error");
1434 return -1;
1435 }
1436 wait_t = 7 + (10000 + state->srate / 2) / state->srate;
1437 if (wait_t == 0)
1438 wait_t = 1;
1439 msleep_interruptible(wait_t);
1440 if (mb86a16_read(state, 0x37, &SIG1) != 2) {
1441 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1442 return -EREMOTEIO;
1443 }
1444
1445 if (SIG1 > 110) {
1446 S2T = 4; S4T = 1; S5T = 6; ETH = 4; VIA = 6;
1447 wait_t = 7 + (917504 + state->srate / 2) / state->srate;
1448 } else if (SIG1 > 105) {
1449 S2T = 4; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
1450 wait_t = 7 + (1048576 + state->srate / 2) / state->srate;
1451 } else if (SIG1 > 85) {
1452 S2T = 5; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
1453 wait_t = 7 + (1310720 + state->srate / 2) / state->srate;
1454 } else if (SIG1 > 65) {
1455 S2T = 6; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
1456 wait_t = 7 + (1572864 + state->srate / 2) / state->srate;
1457 } else {
1458 S2T = 7; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
1459 wait_t = 7 + (2097152 + state->srate / 2) / state->srate;
1460 }
1461 wait_t *= 2; /* FOS */
1462 S2T_set(state, S2T);
1463 S45T_set(state, S4T, S5T);
1464 Vi_set(state, ETH, VIA);
1465 srst(state);
1466 msleep_interruptible(wait_t);
1467 sync = sync_chk(state, &VIRM);
1468 dprintk(verbose, MB86A16_INFO, 1, "-------- Viterbi=[%d] SYNC=[%d] ---------", VIRM, sync);
1469 if (VIRM) {
1470 if (VIRM == 4) {
1471 /* 5/6 */
1472 if (SIG1 > 110)
1473 wait_t = (786432 + state->srate / 2) / state->srate;
1474 else
1475 wait_t = (1572864 + state->srate / 2) / state->srate;
1476 if (state->srate < 5000)
1477 /* FIXME ! , should be a long wait ! */
1478 msleep_interruptible(wait_t);
1479 else
1480 msleep_interruptible(wait_t);
1481
1482 if (sync_chk(state, &junk) == 0) {
1483 iq_vt_set(state, 1);
1484 FEC_srst(state);
1485 }
1486 }
1487 /* 1/2, 2/3, 3/4, 7/8 */
1488 if (SIG1 > 110)
1489 wait_t = (786432 + state->srate / 2) / state->srate;
1490 else
1491 wait_t = (1572864 + state->srate / 2) / state->srate;
1492 msleep_interruptible(wait_t);
1493 SEQ_set(state, 1);
1494 } else {
1495 dprintk(verbose, MB86A16_INFO, 1, "NO -- SYNC");
1496 SEQ_set(state, 1);
1497 ret = -1;
1498 }
1499 }
1500 } else {
1501 dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL");
1502 ret = -1;
1503 }
1504
1505 sync = sync_chk(state, &junk);
1506 if (sync) {
1507 dprintk(verbose, MB86A16_INFO, 1, "******* SYNC *******");
1508 freqerr_chk(state, state->frequency, state->srate, 1);
1509 ret = 0;
1510 break;
1511 }
1512 }
1513
1514 mb86a16_read(state, 0x15, &agcval);
1515 mb86a16_read(state, 0x26, &cnmval);
1516 dprintk(verbose, MB86A16_INFO, 1, "AGC = %02x CNM = %02x", agcval, cnmval);
1517
1518 return ret;
1519}
1520
1521static int mb86a16_send_diseqc_msg(struct dvb_frontend *fe,
1522 struct dvb_diseqc_master_cmd *cmd)
1523{
1524 struct mb86a16_state *state = fe->demodulator_priv;
1525 int i;
1526 u8 regs;
1527
1528 if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0)
1529 goto err;
1530 if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0)
1531 goto err;
1532 if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0)
1533 goto err;
1534
1535 regs = 0x18;
1536
1537 if (cmd->msg_len > 5 || cmd->msg_len < 4)
1538 return -EINVAL;
1539
1540 for (i = 0; i < cmd->msg_len; i++) {
1541 if (mb86a16_write(state, regs, cmd->msg[i]) < 0)
1542 goto err;
1543
1544 regs++;
1545 }
1546 i += 0x90;
1547
1548 msleep_interruptible(10);
1549
1550 if (mb86a16_write(state, MB86A16_DCC1, i) < 0)
1551 goto err;
1552 if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
1553 goto err;
1554
1555 return 0;
1556
1557err:
1558 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1559 return -EREMOTEIO;
1560}
1561
1562static int mb86a16_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst)
1563{
1564 struct mb86a16_state *state = fe->demodulator_priv;
1565
1566 switch (burst) {
1567 case SEC_MINI_A:
1568 if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
1569 MB86A16_DCC1_TBEN |
1570 MB86A16_DCC1_TBO) < 0)
1571 goto err;
1572 if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
1573 goto err;
1574 break;
1575 case SEC_MINI_B:
1576 if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
1577 MB86A16_DCC1_TBEN) < 0)
1578 goto err;
1579 if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
1580 goto err;
1581 break;
1582 }
1583
1584 return 0;
1585err:
1586 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1587 return -EREMOTEIO;
1588}
1589
1590static int mb86a16_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
1591{
1592 struct mb86a16_state *state = fe->demodulator_priv;
1593
1594 switch (tone) {
1595 case SEC_TONE_ON:
1596 if (mb86a16_write(state, MB86A16_TONEOUT2, 0x00) < 0)
1597 goto err;
1598 if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
1599 MB86A16_DCC1_CTOE) < 0)
1600
1601 goto err;
1602 if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
1603 goto err;
1604 break;
1605 case SEC_TONE_OFF:
1606 if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0)
1607 goto err;
1608 if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0)
1609 goto err;
1610 if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0)
1611 goto err;
1612 break;
1613 default:
1614 return -EINVAL;
1615 }
1616 return 0;
1617
1618err:
1619 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1620 return -EREMOTEIO;
1621}
1622
1623static enum dvbfe_search mb86a16_search(struct dvb_frontend *fe,
1624 struct dvb_frontend_parameters *p)
1625{
1626 struct mb86a16_state *state = fe->demodulator_priv;
1627
1628 state->frequency = p->frequency / 1000;
1629 state->srate = p->u.qpsk.symbol_rate / 1000;
1630
1631 if (!mb86a16_set_fe(state)) {
1632 dprintk(verbose, MB86A16_ERROR, 1, "Succesfully acquired LOCK");
1633 return DVBFE_ALGO_SEARCH_SUCCESS;
1634 }
1635
1636 dprintk(verbose, MB86A16_ERROR, 1, "Lock acquisition failed!");
1637 return DVBFE_ALGO_SEARCH_FAILED;
1638}
1639
1640static void mb86a16_release(struct dvb_frontend *fe)
1641{
1642 struct mb86a16_state *state = fe->demodulator_priv;
1643 kfree(state);
1644}
1645
1646static int mb86a16_init(struct dvb_frontend *fe)
1647{
1648 return 0;
1649}
1650
1651static int mb86a16_sleep(struct dvb_frontend *fe)
1652{
1653 return 0;
1654}
1655
1656static int mb86a16_read_ber(struct dvb_frontend *fe, u32 *ber)
1657{
1658 u8 ber_mon, ber_tab, ber_lsb, ber_mid, ber_msb, ber_tim, ber_rst;
1659 u32 timer;
1660
1661 struct mb86a16_state *state = fe->demodulator_priv;
1662
1663 *ber = 0;
1664 if (mb86a16_read(state, MB86A16_BERMON, &ber_mon) != 2)
1665 goto err;
1666 if (mb86a16_read(state, MB86A16_BERTAB, &ber_tab) != 2)
1667 goto err;
1668 if (mb86a16_read(state, MB86A16_BERLSB, &ber_lsb) != 2)
1669 goto err;
1670 if (mb86a16_read(state, MB86A16_BERMID, &ber_mid) != 2)
1671 goto err;
1672 if (mb86a16_read(state, MB86A16_BERMSB, &ber_msb) != 2)
1673 goto err;
1674 /* BER monitor invalid when BER_EN = 0 */
1675 if (ber_mon & 0x04) {
1676 /* coarse, fast calculation */
1677 *ber = ber_tab & 0x1f;
1678 dprintk(verbose, MB86A16_DEBUG, 1, "BER coarse=[0x%02x]", *ber);
1679 if (ber_mon & 0x01) {
1680 /*
1681 * BER_SEL = 1, The monitored BER is the estimated
1682 * value with a Reed-Solomon decoder error amount at
1683 * the deinterleaver output.
1684 * monitored BER is expressed as a 20 bit output in total
1685 */
1686 ber_rst = ber_mon >> 3;
1687 *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb;
1688 if (ber_rst == 0)
1689 timer = 12500000;
1690 if (ber_rst == 1)
1691 timer = 25000000;
1692 if (ber_rst == 2)
1693 timer = 50000000;
1694 if (ber_rst == 3)
1695 timer = 100000000;
1696
1697 *ber /= timer;
1698 dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber);
1699 } else {
1700 /*
1701 * BER_SEL = 0, The monitored BER is the estimated
1702 * value with a Viterbi decoder error amount at the
1703 * QPSK demodulator output.
1704 * monitored BER is expressed as a 24 bit output in total
1705 */
1706 ber_tim = ber_mon >> 1;
1707 *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb;
1708 if (ber_tim == 0)
1709 timer = 16;
1710 if (ber_tim == 1)
1711 timer = 24;
1712
1713 *ber /= 2 ^ timer;
1714 dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber);
1715 }
1716 }
1717 return 0;
1718err:
1719 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1720 return -EREMOTEIO;
1721}
1722
1723static int mb86a16_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
1724{
1725 u8 agcm = 0;
1726 struct mb86a16_state *state = fe->demodulator_priv;
1727
1728 *strength = 0;
1729 if (mb86a16_read(state, MB86A16_AGCM, &agcm) != 2) {
1730 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1731 return -EREMOTEIO;
1732 }
1733
1734 *strength = ((0xff - agcm) * 100) / 256;
1735 dprintk(verbose, MB86A16_DEBUG, 1, "Signal strength=[%d %%]", (u8) *strength);
1736 *strength = (0xffff - 0xff) + agcm;
1737
1738 return 0;
1739}
1740
1741struct cnr {
1742 u8 cn_reg;
1743 u8 cn_val;
1744};
1745
1746static const struct cnr cnr_tab[] = {
1747 { 35, 2 },
1748 { 40, 3 },
1749 { 50, 4 },
1750 { 60, 5 },
1751 { 70, 6 },
1752 { 80, 7 },
1753 { 92, 8 },
1754 { 103, 9 },
1755 { 115, 10 },
1756 { 138, 12 },
1757 { 162, 15 },
1758 { 180, 18 },
1759 { 185, 19 },
1760 { 189, 20 },
1761 { 195, 22 },
1762 { 199, 24 },
1763 { 201, 25 },
1764 { 202, 26 },
1765 { 203, 27 },
1766 { 205, 28 },
1767 { 208, 30 }
1768};
1769
1770static int mb86a16_read_snr(struct dvb_frontend *fe, u16 *snr)
1771{
1772 struct mb86a16_state *state = fe->demodulator_priv;
1773 int i = 0;
1774 int low_tide = 2, high_tide = 30, q_level;
1775 u8 cn;
1776
1777 *snr = 0;
1778 if (mb86a16_read(state, 0x26, &cn) != 2) {
1779 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1780 return -EREMOTEIO;
1781 }
1782
1783 for (i = 0; i < ARRAY_SIZE(cnr_tab); i++) {
1784 if (cn < cnr_tab[i].cn_reg) {
1785 *snr = cnr_tab[i].cn_val;
1786 break;
1787 }
1788 }
1789 q_level = (*snr * 100) / (high_tide - low_tide);
1790 dprintk(verbose, MB86A16_ERROR, 1, "SNR (Quality) = [%d dB], Level=%d %%", *snr, q_level);
1791 *snr = (0xffff - 0xff) + *snr;
1792
1793 return 0;
1794}
1795
1796static int mb86a16_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
1797{
1798 u8 dist;
1799 struct mb86a16_state *state = fe->demodulator_priv;
1800
1801 if (mb86a16_read(state, MB86A16_DISTMON, &dist) != 2) {
1802 dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
1803 return -EREMOTEIO;
1804 }
1805 *ucblocks = dist;
1806
1807 return 0;
1808}
1809
1810static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe)
1811{
1812 return DVBFE_ALGO_CUSTOM;
1813}
1814
1815static struct dvb_frontend_ops mb86a16_ops = {
1816 .info = {
1817 .name = "Fujitsu MB86A16 DVB-S",
1818 .type = FE_QPSK,
1819 .frequency_min = 950000,
1820 .frequency_max = 2150000,
1821 .frequency_stepsize = 3000,
1822 .frequency_tolerance = 0,
1823 .symbol_rate_min = 1000000,
1824 .symbol_rate_max = 45000000,
1825 .symbol_rate_tolerance = 500,
1826 .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
1827 FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 |
1828 FE_CAN_FEC_7_8 | FE_CAN_QPSK |
1829 FE_CAN_FEC_AUTO
1830 },
1831 .release = mb86a16_release,
1832
1833 .get_frontend_algo = mb86a16_frontend_algo,
1834 .search = mb86a16_search,
1835 .read_status = mb86a16_read_status,
1836 .init = mb86a16_init,
1837 .sleep = mb86a16_sleep,
1838 .read_status = mb86a16_read_status,
1839
1840 .read_ber = mb86a16_read_ber,
1841 .read_signal_strength = mb86a16_read_signal_strength,
1842 .read_snr = mb86a16_read_snr,
1843 .read_ucblocks = mb86a16_read_ucblocks,
1844
1845 .diseqc_send_master_cmd = mb86a16_send_diseqc_msg,
1846 .diseqc_send_burst = mb86a16_send_diseqc_burst,
1847 .set_tone = mb86a16_set_tone,
1848};
1849
1850struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
1851 struct i2c_adapter *i2c_adap)
1852{
1853 u8 dev_id = 0;
1854 struct mb86a16_state *state = NULL;
1855
1856 state = kmalloc(sizeof(struct mb86a16_state), GFP_KERNEL);
1857 if (state == NULL)
1858 goto error;
1859
1860 state->config = config;
1861 state->i2c_adap = i2c_adap;
1862
1863 mb86a16_read(state, 0x7f, &dev_id);
1864 if (dev_id != 0xfe)
1865 goto error;
1866
1867 memcpy(&state->frontend.ops, &mb86a16_ops, sizeof(struct dvb_frontend_ops));
1868 state->frontend.demodulator_priv = state;
1869 state->frontend.ops.set_voltage = state->config->set_voltage;
1870
1871 return &state->frontend;
1872error:
1873 kfree(state);
1874 return NULL;
1875}
1876EXPORT_SYMBOL(mb86a16_attach);
1877MODULE_LICENSE("GPL");
1878MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb/frontends/mb86a16.h b/drivers/media/dvb/frontends/mb86a16.h
new file mode 100644
index 000000000000..6ea8c376394f
--- /dev/null
+++ b/drivers/media/dvb/frontends/mb86a16.h
@@ -0,0 +1,52 @@
1/*
2 Fujitsu MB86A16 DVB-S/DSS DC Receiver driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MB86A16_H
22#define __MB86A16_H
23
24#include <linux/dvb/frontend.h>
25#include "dvb_frontend.h"
26
27
28struct mb86a16_config {
29 u8 demod_address;
30
31 int (*set_voltage)(struct dvb_frontend *fe, fe_sec_voltage_t voltage);
32};
33
34
35
36#if defined(CONFIG_DVB_MB86A16) || (defined(CONFIG_DVB_MB86A16_MODULE) && defined(MODULE))
37
38extern struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
39 struct i2c_adapter *i2c_adap);
40
41#else
42
43static inline struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
44 struct i2c_adapter *i2c_adap)
45{
46 printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
47 return NULL;
48}
49
50#endif /* CONFIG_DVB_MB86A16 */
51
52#endif /* __MB86A16_H */
diff --git a/drivers/media/dvb/frontends/mb86a16_priv.h b/drivers/media/dvb/frontends/mb86a16_priv.h
new file mode 100644
index 000000000000..360a35acfe84
--- /dev/null
+++ b/drivers/media/dvb/frontends/mb86a16_priv.h
@@ -0,0 +1,151 @@
1/*
2 Fujitsu MB86A16 DVB-S/DSS DC Receiver driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MB86A16_PRIV_H
22#define __MB86A16_PRIV_H
23
24#define MB86A16_TSOUT 0x00
25#define MB86A16_TSOUT_HIZSEL (0x01 << 5)
26#define MB86A16_TSOUT_HIZCNTI (0x01 << 4)
27#define MB86A16_TSOUT_MODE (0x01 << 3)
28#define MB86A16_TSOUT_ORDER (0x01 << 2)
29#define MB86A16_TSOUT_ERROR (0x01 << 1)
30#define Mb86A16_TSOUT_EDGE (0x01 << 0)
31
32#define MB86A16_FEC 0x01
33#define MB86A16_FEC_FSYNC (0x01 << 5)
34#define MB86A16_FEC_PCKB8 (0x01 << 4)
35#define MB86A16_FEC_DVDS (0x01 << 3)
36#define MB86A16_FEC_EREN (0x01 << 2)
37#define Mb86A16_FEC_RSEN (0x01 << 1)
38#define MB86A16_FEC_DIEN (0x01 << 0)
39
40#define MB86A16_AGC 0x02
41#define MB86A16_AGC_AGMD (0x01 << 6)
42#define MB86A16_AGC_AGCW (0x0f << 2)
43#define MB86A16_AGC_AGCP (0x01 << 1)
44#define MB86A16_AGC_AGCR (0x01 << 0)
45
46#define MB86A16_SRATE1 0x03
47#define MB86A16_SRATE1_DECI (0x07 << 2)
48#define MB86A16_SRATE1_CSEL (0x01 << 1)
49#define MB86A16_SRATE1_RSEL (0x01 << 0)
50
51#define MB86A16_SRATE2 0x04
52#define MB86A16_SRATE2_STOFSL (0xff << 0)
53
54#define MB86A16_SRATE3 0x05
55#define MB86A16_SRATE2_STOFSH (0xff << 0)
56
57#define MB86A16_VITERBI 0x06
58#define MB86A16_FRAMESYNC 0x07
59#define MB86A16_CRLFILTCOEF1 0x08
60#define MB86A16_CRLFILTCOEF2 0x09
61#define MB86A16_STRFILTCOEF1 0x0a
62#define MB86A16_STRFILTCOEF2 0x0b
63#define MB86A16_RESET 0x0c
64#define MB86A16_STATUS 0x0d
65#define MB86A16_AFCML 0x0e
66#define MB86A16_AFCMH 0x0f
67#define MB86A16_BERMON 0x10
68#define MB86A16_BERTAB 0x11
69#define MB86A16_BERLSB 0x12
70#define MB86A16_BERMID 0x13
71#define MB86A16_BERMSB 0x14
72#define MB86A16_AGCM 0x15
73
74#define MB86A16_DCC1 0x16
75#define MB86A16_DCC1_DISTA (0x01 << 7)
76#define MB86A16_DCC1_PRTY (0x01 << 6)
77#define MB86A16_DCC1_CTOE (0x01 << 5)
78#define MB86A16_DCC1_TBEN (0x01 << 4)
79#define MB86A16_DCC1_TBO (0x01 << 3)
80#define MB86A16_DCC1_NUM (0x07 << 0)
81
82#define MB86A16_DCC2 0x17
83#define MB86A16_DCC2_DCBST (0x01 << 0)
84
85#define MB86A16_DCC3 0x18
86#define MB86A16_DCC3_CODE0 (0xff << 0)
87
88#define MB86A16_DCC4 0x19
89#define MB86A16_DCC4_CODE1 (0xff << 0)
90
91#define MB86A16_DCC5 0x1a
92#define MB86A16_DCC5_CODE2 (0xff << 0)
93
94#define MB86A16_DCC6 0x1b
95#define MB86A16_DCC6_CODE3 (0xff << 0)
96
97#define MB86A16_DCC7 0x1c
98#define MB86A16_DCC7_CODE4 (0xff << 0)
99
100#define MB86A16_DCC8 0x1d
101#define MB86A16_DCC8_CODE5 (0xff << 0)
102
103#define MB86A16_DCCOUT 0x1e
104#define MB86A16_DCCOUT_DISEN (0x01 << 0)
105
106#define MB86A16_TONEOUT1 0x1f
107#define MB86A16_TONE_TDIVL (0xff << 0)
108
109#define MB86A16_TONEOUT2 0x20
110#define MB86A16_TONE_TMD (0x03 << 2)
111#define MB86A16_TONE_TDIVH (0x03 << 0)
112
113#define MB86A16_FREQ1 0x21
114#define MB86A16_FREQ2 0x22
115#define MB86A16_FREQ3 0x23
116#define MB86A16_FREQ4 0x24
117#define MB86A16_FREQSET 0x25
118#define MB86A16_CNM 0x26
119#define MB86A16_PORT0 0x27
120#define MB86A16_PORT1 0x28
121#define MB86A16_DRCFILT 0x29
122#define MB86A16_AFC 0x2a
123#define MB86A16_AFCEXL 0x2b
124#define MB86A16_AFCEXH 0x2c
125#define MB86A16_DAGC 0x2d
126#define MB86A16_SEQMODE 0x32
127#define MB86A16_S0S1T 0x33
128#define MB86A16_S2S3T 0x34
129#define MB86A16_S4S5T 0x35
130#define MB86A16_CNTMR 0x36
131#define MB86A16_SIG1 0x37
132#define MB86A16_SIG2 0x38
133#define MB86A16_VIMAG 0x39
134#define MB86A16_VISET1 0x3a
135#define MB86A16_VISET2 0x3b
136#define MB86A16_VISET3 0x3c
137#define MB86A16_FAGCS1 0x3d
138#define MB86A16_FAGCS2 0x3e
139#define MB86A16_FAGCS3 0x3f
140#define MB86A16_FAGCS4 0x40
141#define MB86A16_FAGCS5 0x41
142#define MB86A16_FAGCS6 0x42
143#define MB86A16_CRM 0x43
144#define MB86A16_STRM 0x44
145#define MB86A16_DAGCML 0x45
146#define MB86A16_DAGCMH 0x46
147#define MB86A16_QPSKTST 0x49
148#define MB86A16_DISTMON 0x52
149#define MB86A16_VERSION 0x7f
150
151#endif /* __MB86A16_PRIV_H */
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 6c1dbf9288d8..6ca533ea0f0e 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -426,6 +426,10 @@ struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
426 id = tda10021_readreg(state, 0x1a); 426 id = tda10021_readreg(state, 0x1a);
427 if ((id & 0xf0) != 0x70) goto error; 427 if ((id & 0xf0) != 0x70) goto error;
428 428
429 /* Don't claim TDA10023 */
430 if (id == 0x7d)
431 goto error;
432
429 printk("TDA10021: i2c-addr = 0x%02x, id = 0x%02x\n", 433 printk("TDA10021: i2c-addr = 0x%02x, id = 0x%02x\n",
430 state->config->demod_address, id); 434 state->config->demod_address, id);
431 435
diff --git a/drivers/media/dvb/frontends/tda665x.c b/drivers/media/dvb/frontends/tda665x.c
new file mode 100644
index 000000000000..87d52739c828
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda665x.c
@@ -0,0 +1,257 @@
1/*
2 TDA665x tuner driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/
19
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23
24#include "dvb_frontend.h"
25#include "tda665x.h"
26
27struct tda665x_state {
28 struct dvb_frontend *fe;
29 struct i2c_adapter *i2c;
30 const struct tda665x_config *config;
31
32 u32 frequency;
33 u32 bandwidth;
34};
35
36static int tda665x_read(struct tda665x_state *state, u8 *buf)
37{
38 const struct tda665x_config *config = state->config;
39 int err = 0;
40 struct i2c_msg msg = { .addr = config->addr, .flags = I2C_M_RD, .buf = buf, .len = 2 };
41
42 err = i2c_transfer(state->i2c, &msg, 1);
43 if (err != 1)
44 goto exit;
45
46 return err;
47exit:
48 printk(KERN_ERR "%s: I/O Error err=<%d>\n", __func__, err);
49 return err;
50}
51
52static int tda665x_write(struct tda665x_state *state, u8 *buf, u8 length)
53{
54 const struct tda665x_config *config = state->config;
55 int err = 0;
56 struct i2c_msg msg = { .addr = config->addr, .flags = 0, .buf = buf, .len = length };
57
58 err = i2c_transfer(state->i2c, &msg, 1);
59 if (err != 1)
60 goto exit;
61
62 return err;
63exit:
64 printk(KERN_ERR "%s: I/O Error err=<%d>\n", __func__, err);
65 return err;
66}
67
68static int tda665x_get_state(struct dvb_frontend *fe,
69 enum tuner_param param,
70 struct tuner_state *tstate)
71{
72 struct tda665x_state *state = fe->tuner_priv;
73 int err = 0;
74
75 switch (param) {
76 case DVBFE_TUNER_FREQUENCY:
77 tstate->frequency = state->frequency;
78 break;
79 case DVBFE_TUNER_BANDWIDTH:
80 break;
81 default:
82 printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param);
83 err = -EINVAL;
84 break;
85 }
86
87 return err;
88}
89
90static int tda665x_get_status(struct dvb_frontend *fe, u32 *status)
91{
92 struct tda665x_state *state = fe->tuner_priv;
93 u8 result = 0;
94 int err = 0;
95
96 *status = 0;
97
98 err = tda665x_read(state, &result);
99 if (err < 0)
100 goto exit;
101
102 if ((result >> 6) & 0x01) {
103 printk(KERN_DEBUG "%s: Tuner Phase Locked\n", __func__);
104 *status = 1;
105 }
106
107 return err;
108exit:
109 printk(KERN_ERR "%s: I/O Error\n", __func__);
110 return err;
111}
112
113static int tda665x_set_state(struct dvb_frontend *fe,
114 enum tuner_param param,
115 struct tuner_state *tstate)
116{
117 struct tda665x_state *state = fe->tuner_priv;
118 const struct tda665x_config *config = state->config;
119 u32 frequency, status = 0;
120 u8 buf[4];
121 int err = 0;
122
123 if (param & DVBFE_TUNER_FREQUENCY) {
124
125 frequency = tstate->frequency;
126 if ((frequency < config->frequency_max) || (frequency > config->frequency_min)) {
127 printk(KERN_ERR "%s: Frequency beyond limits, frequency=%d\n", __func__, frequency);
128 return -EINVAL;
129 }
130
131 frequency += config->frequency_offst;
132 frequency *= config->ref_multiplier;
133 frequency += config->ref_divider >> 1;
134 frequency /= config->ref_divider;
135
136 buf[0] = (u8) (frequency & 0x7f00) >> 8;
137 buf[1] = (u8) (frequency & 0x00ff) >> 0;
138 buf[2] = 0x80 | 0x40 | 0x02;
139 buf[3] = 0x00;
140
141 /* restore frequency */
142 frequency = tstate->frequency;
143
144 if (frequency < 153000000) {
145 /* VHF-L */
146 buf[3] |= 0x01; /* fc, Low Band, 47 - 153 MHz */
147 if (frequency < 68000000)
148 buf[3] |= 0x40; /* 83uA */
149 if (frequency < 1040000000)
150 buf[3] |= 0x60; /* 122uA */
151 if (frequency < 1250000000)
152 buf[3] |= 0x80; /* 163uA */
153 else
154 buf[3] |= 0xa0; /* 254uA */
155 } else if (frequency < 438000000) {
156 /* VHF-H */
157 buf[3] |= 0x02; /* fc, Mid Band, 153 - 438 MHz */
158 if (frequency < 230000000)
159 buf[3] |= 0x40;
160 if (frequency < 300000000)
161 buf[3] |= 0x60;
162 else
163 buf[3] |= 0x80;
164 } else {
165 /* UHF */
166 buf[3] |= 0x04; /* fc, High Band, 438 - 862 MHz */
167 if (frequency < 470000000)
168 buf[3] |= 0x60;
169 if (frequency < 526000000)
170 buf[3] |= 0x80;
171 else
172 buf[3] |= 0xa0;
173 }
174
175 /* Set params */
176 err = tda665x_write(state, buf, 5);
177 if (err < 0)
178 goto exit;
179
180 /* sleep for some time */
181 printk(KERN_DEBUG "%s: Waiting to Phase LOCK\n", __func__);
182 msleep(20);
183 /* check status */
184 err = tda665x_get_status(fe, &status);
185 if (err < 0)
186 goto exit;
187
188 if (status == 1) {
189 printk(KERN_DEBUG "%s: Tuner Phase locked: status=%d\n", __func__, status);
190 state->frequency = frequency; /* cache successful state */
191 } else {
192 printk(KERN_ERR "%s: No Phase lock: status=%d\n", __func__, status);
193 }
194 } else {
195 printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param);
196 return -EINVAL;
197 }
198
199 return 0;
200exit:
201 printk(KERN_ERR "%s: I/O Error\n", __func__);
202 return err;
203}
204
205static int tda665x_release(struct dvb_frontend *fe)
206{
207 struct tda665x_state *state = fe->tuner_priv;
208
209 fe->tuner_priv = NULL;
210 kfree(state);
211 return 0;
212}
213
214static struct dvb_tuner_ops tda665x_ops = {
215
216 .set_state = tda665x_set_state,
217 .get_state = tda665x_get_state,
218 .get_status = tda665x_get_status,
219 .release = tda665x_release
220};
221
222struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
223 const struct tda665x_config *config,
224 struct i2c_adapter *i2c)
225{
226 struct tda665x_state *state = NULL;
227 struct dvb_tuner_info *info;
228
229 state = kzalloc(sizeof(struct tda665x_state), GFP_KERNEL);
230 if (state == NULL)
231 goto exit;
232
233 state->config = config;
234 state->i2c = i2c;
235 state->fe = fe;
236 fe->tuner_priv = state;
237 fe->ops.tuner_ops = tda665x_ops;
238 info = &fe->ops.tuner_ops.info;
239
240 memcpy(info->name, config->name, sizeof(config->name));
241 info->frequency_min = config->frequency_min;
242 info->frequency_max = config->frequency_max;
243 info->frequency_step = config->frequency_offst;
244
245 printk(KERN_DEBUG "%s: Attaching TDA665x (%s) tuner\n", __func__, info->name);
246
247 return fe;
248
249exit:
250 kfree(state);
251 return NULL;
252}
253EXPORT_SYMBOL(tda665x_attach);
254
255MODULE_DESCRIPTION("TDA665x driver");
256MODULE_AUTHOR("Manu Abraham");
257MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/tda665x.h b/drivers/media/dvb/frontends/tda665x.h
new file mode 100644
index 000000000000..ec7927aa75ae
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda665x.h
@@ -0,0 +1,52 @@
1/*
2 TDA665x tuner driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/
19
20#ifndef __TDA665x_H
21#define __TDA665x_H
22
23struct tda665x_config {
24 char name[128];
25
26 u8 addr;
27 u32 frequency_min;
28 u32 frequency_max;
29 u32 frequency_offst;
30 u32 ref_multiplier;
31 u32 ref_divider;
32};
33
34#if defined(CONFIG_DVB_TDA665x) || (defined(CONFIG_DVB_TDA665x_MODULE) && defined(MODULE))
35
36extern struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
37 const struct tda665x_config *config,
38 struct i2c_adapter *i2c);
39
40#else
41
42static inline struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
43 const struct tda665x_config *config,
44 struct i2c_adapter *i2c)
45{
46 printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
47 return NULL;
48}
49
50#endif /* CONFIG_DVB_TDA665x */
51
52#endif /* __TDA665x_H */
diff --git a/drivers/media/dvb/mantis/Kconfig b/drivers/media/dvb/mantis/Kconfig
new file mode 100644
index 000000000000..f7b72a32adf3
--- /dev/null
+++ b/drivers/media/dvb/mantis/Kconfig
@@ -0,0 +1,32 @@
1config MANTIS_CORE
2 tristate "Mantis/Hopper PCI bridge based devices"
3 depends on PCI && I2C && INPUT
4
5 help
6 Support for PCI cards based on the Mantis and Hopper PCi bridge.
7
8 Say Y if you own such a device and want to use it.
9
10config DVB_MANTIS
11 tristate "MANTIS based cards"
12 depends on MANTIS_CORE && DVB_CORE && PCI && I2C
13 select DVB_MB86A16
14 select DVB_ZL10353
15 select DVB_STV0299
16 select DVB_PLL
17 help
18 Support for PCI cards based on the Mantis PCI bridge.
19 Say Y when you have a Mantis based DVB card and want to use it.
20
21 If unsure say N.
22
23config DVB_HOPPER
24 tristate "HOPPER based cards"
25 depends on MANTIS_CORE && DVB_CORE && PCI && I2C
26 select DVB_ZL10353
27 select DVB_PLL
28 help
29 Support for PCI cards based on the Hopper PCI bridge.
30 Say Y when you have a Hopper based DVB card and want to use it.
31
32 If unsure say N
diff --git a/drivers/media/dvb/mantis/Makefile b/drivers/media/dvb/mantis/Makefile
new file mode 100644
index 000000000000..98dc5cd258ac
--- /dev/null
+++ b/drivers/media/dvb/mantis/Makefile
@@ -0,0 +1,28 @@
1mantis_core-objs := mantis_ioc.o \
2 mantis_uart.o \
3 mantis_dma.o \
4 mantis_pci.o \
5 mantis_i2c.o \
6 mantis_dvb.o \
7 mantis_evm.o \
8 mantis_hif.o \
9 mantis_ca.o \
10 mantis_pcmcia.o \
11 mantis_input.o
12
13mantis-objs := mantis_cards.o \
14 mantis_vp1033.o \
15 mantis_vp1034.o \
16 mantis_vp1041.o \
17 mantis_vp2033.o \
18 mantis_vp2040.o \
19 mantis_vp3030.o
20
21hopper-objs := hopper_cards.o \
22 hopper_vp3028.o
23
24obj-$(CONFIG_MANTIS_CORE) += mantis_core.o
25obj-$(CONFIG_DVB_MANTIS) += mantis.o
26obj-$(CONFIG_DVB_HOPPER) += hopper.o
27
28EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/mantis/hopper_cards.c b/drivers/media/dvb/mantis/hopper_cards.c
new file mode 100644
index 000000000000..d073c61e3c0d
--- /dev/null
+++ b/drivers/media/dvb/mantis/hopper_cards.c
@@ -0,0 +1,275 @@
1/*
2 Hopper PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/kernel.h>
24#include <linux/pci.h>
25#include <asm/irq.h>
26#include <linux/interrupt.h>
27
28#include "dmxdev.h"
29#include "dvbdev.h"
30#include "dvb_demux.h"
31#include "dvb_frontend.h"
32#include "dvb_net.h"
33
34#include "mantis_common.h"
35#include "hopper_vp3028.h"
36#include "mantis_dma.h"
37#include "mantis_dvb.h"
38#include "mantis_uart.h"
39#include "mantis_ioc.h"
40#include "mantis_pci.h"
41#include "mantis_i2c.h"
42#include "mantis_reg.h"
43
44static unsigned int verbose;
45module_param(verbose, int, 0644);
46MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
47
48#define DRIVER_NAME "Hopper"
49
50static char *label[10] = {
51 "DMA",
52 "IRQ-0",
53 "IRQ-1",
54 "OCERR",
55 "PABRT",
56 "RIPRR",
57 "PPERR",
58 "FTRGT",
59 "RISCI",
60 "RACK"
61};
62
63static int devs;
64
65static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
66{
67 u32 stat = 0, mask = 0, lstat = 0, mstat = 0;
68 u32 rst_stat = 0, rst_mask = 0;
69
70 struct mantis_pci *mantis;
71 struct mantis_ca *ca;
72
73 mantis = (struct mantis_pci *) dev_id;
74 if (unlikely(mantis == NULL)) {
75 dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
76 return IRQ_NONE;
77 }
78 ca = mantis->mantis_ca;
79
80 stat = mmread(MANTIS_INT_STAT);
81 mask = mmread(MANTIS_INT_MASK);
82 mstat = lstat = stat & ~MANTIS_INT_RISCSTAT;
83 if (!(stat & mask))
84 return IRQ_NONE;
85
86 rst_mask = MANTIS_GPIF_WRACK |
87 MANTIS_GPIF_OTHERR |
88 MANTIS_SBUF_WSTO |
89 MANTIS_GPIF_EXTIRQ;
90
91 rst_stat = mmread(MANTIS_GPIF_STATUS);
92 rst_stat &= rst_mask;
93 mmwrite(rst_stat, MANTIS_GPIF_STATUS);
94
95 mantis->mantis_int_stat = stat;
96 mantis->mantis_int_mask = mask;
97 dprintk(MANTIS_DEBUG, 0, "\n-- Stat=<%02x> Mask=<%02x> --", stat, mask);
98 if (stat & MANTIS_INT_RISCEN) {
99 dprintk(MANTIS_DEBUG, 0, "<%s>", label[0]);
100 }
101 if (stat & MANTIS_INT_IRQ0) {
102 dprintk(MANTIS_DEBUG, 0, "<%s>", label[1]);
103 mantis->gpif_status = rst_stat;
104 wake_up(&ca->hif_write_wq);
105 schedule_work(&ca->hif_evm_work);
106 }
107 if (stat & MANTIS_INT_IRQ1) {
108 dprintk(MANTIS_DEBUG, 0, "<%s>", label[2]);
109 schedule_work(&mantis->uart_work);
110 }
111 if (stat & MANTIS_INT_OCERR) {
112 dprintk(MANTIS_DEBUG, 0, "<%s>", label[3]);
113 }
114 if (stat & MANTIS_INT_PABORT) {
115 dprintk(MANTIS_DEBUG, 0, "<%s>", label[4]);
116 }
117 if (stat & MANTIS_INT_RIPERR) {
118 dprintk(MANTIS_DEBUG, 0, "<%s>", label[5]);
119 }
120 if (stat & MANTIS_INT_PPERR) {
121 dprintk(MANTIS_DEBUG, 0, "<%s>", label[6]);
122 }
123 if (stat & MANTIS_INT_FTRGT) {
124 dprintk(MANTIS_DEBUG, 0, "<%s>", label[7]);
125 }
126 if (stat & MANTIS_INT_RISCI) {
127 dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]);
128 mantis->finished_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
129 tasklet_schedule(&mantis->tasklet);
130 }
131 if (stat & MANTIS_INT_I2CDONE) {
132 dprintk(MANTIS_DEBUG, 0, "<%s>", label[9]);
133 wake_up(&mantis->i2c_wq);
134 }
135 mmwrite(stat, MANTIS_INT_STAT);
136 stat &= ~(MANTIS_INT_RISCEN | MANTIS_INT_I2CDONE |
137 MANTIS_INT_I2CRACK | MANTIS_INT_PCMCIA7 |
138 MANTIS_INT_PCMCIA6 | MANTIS_INT_PCMCIA5 |
139 MANTIS_INT_PCMCIA4 | MANTIS_INT_PCMCIA3 |
140 MANTIS_INT_PCMCIA2 | MANTIS_INT_PCMCIA1 |
141 MANTIS_INT_PCMCIA0 | MANTIS_INT_IRQ1 |
142 MANTIS_INT_IRQ0 | MANTIS_INT_OCERR |
143 MANTIS_INT_PABORT | MANTIS_INT_RIPERR |
144 MANTIS_INT_PPERR | MANTIS_INT_FTRGT |
145 MANTIS_INT_RISCI);
146
147 if (stat)
148 dprintk(MANTIS_DEBUG, 0, "<Unknown> Stat=<%02x> Mask=<%02x>", stat, mask);
149
150 dprintk(MANTIS_DEBUG, 0, "\n");
151 return IRQ_HANDLED;
152}
153
154static int __devinit hopper_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
155{
156 struct mantis_pci *mantis;
157 struct mantis_hwconfig *config;
158 int err = 0;
159
160 mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
161 if (mantis == NULL) {
162 printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
163 err = -ENOMEM;
164 goto fail0;
165 }
166
167 mantis->num = devs;
168 mantis->verbose = verbose;
169 mantis->pdev = pdev;
170 config = (struct mantis_hwconfig *) pci_id->driver_data;
171 config->irq_handler = &hopper_irq_handler;
172 mantis->hwconfig = config;
173
174 err = mantis_pci_init(mantis);
175 if (err) {
176 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI initialization failed <%d>", err);
177 goto fail1;
178 }
179
180 err = mantis_stream_control(mantis, STREAM_TO_HIF);
181 if (err < 0) {
182 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis stream control failed <%d>", err);
183 goto fail1;
184 }
185
186 err = mantis_i2c_init(mantis);
187 if (err < 0) {
188 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C initialization failed <%d>", err);
189 goto fail2;
190 }
191
192 err = mantis_get_mac(mantis);
193 if (err < 0) {
194 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis MAC address read failed <%d>", err);
195 goto fail2;
196 }
197
198 err = mantis_dma_init(mantis);
199 if (err < 0) {
200 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA initialization failed <%d>", err);
201 goto fail3;
202 }
203
204 err = mantis_dvb_init(mantis);
205 if (err < 0) {
206 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DVB initialization failed <%d>", err);
207 goto fail4;
208 }
209 devs++;
210
211 return err;
212
213fail4:
214 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA exit! <%d>", err);
215 mantis_dma_exit(mantis);
216
217fail3:
218 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C exit! <%d>", err);
219 mantis_i2c_exit(mantis);
220
221fail2:
222 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI exit! <%d>", err);
223 mantis_pci_exit(mantis);
224
225fail1:
226 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis free! <%d>", err);
227 kfree(mantis);
228
229fail0:
230 return err;
231}
232
233static void __devexit hopper_pci_remove(struct pci_dev *pdev)
234{
235 struct mantis_pci *mantis = pci_get_drvdata(pdev);
236
237 if (mantis) {
238 mantis_dvb_exit(mantis);
239 mantis_dma_exit(mantis);
240 mantis_i2c_exit(mantis);
241 mantis_pci_exit(mantis);
242 kfree(mantis);
243 }
244 return;
245
246}
247
248static struct pci_device_id hopper_pci_table[] = {
249 MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3028_DVB_T, &vp3028_config),
250 { }
251};
252
253static struct pci_driver hopper_pci_driver = {
254 .name = DRIVER_NAME,
255 .id_table = hopper_pci_table,
256 .probe = hopper_pci_probe,
257 .remove = hopper_pci_remove,
258};
259
260static int __devinit hopper_init(void)
261{
262 return pci_register_driver(&hopper_pci_driver);
263}
264
265static void __devexit hopper_exit(void)
266{
267 return pci_unregister_driver(&hopper_pci_driver);
268}
269
270module_init(hopper_init);
271module_exit(hopper_exit);
272
273MODULE_DESCRIPTION("HOPPER driver");
274MODULE_AUTHOR("Manu Abraham");
275MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/mantis/hopper_vp3028.c b/drivers/media/dvb/mantis/hopper_vp3028.c
new file mode 100644
index 000000000000..96674c78e86b
--- /dev/null
+++ b/drivers/media/dvb/mantis/hopper_vp3028.c
@@ -0,0 +1,88 @@
1/*
2 Hopper VP-3028 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24
25#include "dmxdev.h"
26#include "dvbdev.h"
27#include "dvb_demux.h"
28#include "dvb_frontend.h"
29#include "dvb_net.h"
30
31#include "zl10353.h"
32#include "mantis_common.h"
33#include "mantis_ioc.h"
34#include "mantis_dvb.h"
35#include "hopper_vp3028.h"
36
37struct zl10353_config hopper_vp3028_config = {
38 .demod_address = 0x0f,
39};
40
41#define MANTIS_MODEL_NAME "VP-3028"
42#define MANTIS_DEV_TYPE "DVB-T"
43
44static int vp3028_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
45{
46 struct i2c_adapter *adapter = &mantis->adapter;
47 struct mantis_hwconfig *config = mantis->hwconfig;
48 int err = 0;
49
50 gpio_set_bits(mantis, config->reset, 0);
51 msleep(100);
52 err = mantis_frontend_power(mantis, POWER_ON);
53 msleep(100);
54 gpio_set_bits(mantis, config->reset, 1);
55
56 err = mantis_frontend_power(mantis, POWER_ON);
57 if (err == 0) {
58 msleep(250);
59 dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)");
60 fe = zl10353_attach(&hopper_vp3028_config, adapter);
61
62 if (!fe)
63 return -1;
64 } else {
65 dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
66 adapter->name,
67 err);
68
69 return -EIO;
70 }
71 dprintk(MANTIS_ERROR, 1, "Done!");
72
73 return 0;
74}
75
76struct mantis_hwconfig vp3028_config = {
77 .model_name = MANTIS_MODEL_NAME,
78 .dev_type = MANTIS_DEV_TYPE,
79 .ts_size = MANTIS_TS_188,
80
81 .baud_rate = MANTIS_BAUD_9600,
82 .parity = MANTIS_PARITY_NONE,
83 .bytes = 0,
84
85 .frontend_init = vp3028_frontend_init,
86 .power = GPIF_A00,
87 .reset = GPIF_A03,
88};
diff --git a/drivers/media/dvb/mantis/hopper_vp3028.h b/drivers/media/dvb/mantis/hopper_vp3028.h
new file mode 100644
index 000000000000..57239498bc87
--- /dev/null
+++ b/drivers/media/dvb/mantis/hopper_vp3028.h
@@ -0,0 +1,30 @@
1/*
2 Hopper VP-3028 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_VP3028_H
22#define __MANTIS_VP3028_H
23
24#include "mantis_common.h"
25
26#define MANTIS_VP_3028_DVB_T 0x0028
27
28extern struct mantis_hwconfig vp3028_config;
29
30#endif /* __MANTIS_VP3028_H */
diff --git a/drivers/media/dvb/mantis/mantis_ca.c b/drivers/media/dvb/mantis/mantis_ca.c
new file mode 100644
index 000000000000..403ce043d00e
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ca.c
@@ -0,0 +1,207 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24
25#include "dmxdev.h"
26#include "dvbdev.h"
27#include "dvb_demux.h"
28#include "dvb_frontend.h"
29#include "dvb_net.h"
30
31#include "mantis_common.h"
32#include "mantis_link.h"
33#include "mantis_hif.h"
34#include "mantis_reg.h"
35
36#include "mantis_ca.h"
37
38static int mantis_ca_read_attr_mem(struct dvb_ca_en50221 *en50221, int slot, int addr)
39{
40 struct mantis_ca *ca = en50221->data;
41 struct mantis_pci *mantis = ca->ca_priv;
42
43 dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request Attribute Mem Read", slot);
44
45 if (slot != 0)
46 return -EINVAL;
47
48 return mantis_hif_read_mem(ca, addr);
49}
50
51static int mantis_ca_write_attr_mem(struct dvb_ca_en50221 *en50221, int slot, int addr, u8 data)
52{
53 struct mantis_ca *ca = en50221->data;
54 struct mantis_pci *mantis = ca->ca_priv;
55
56 dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request Attribute Mem Write", slot);
57
58 if (slot != 0)
59 return -EINVAL;
60
61 return mantis_hif_write_mem(ca, addr, data);
62}
63
64static int mantis_ca_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
65{
66 struct mantis_ca *ca = en50221->data;
67 struct mantis_pci *mantis = ca->ca_priv;
68
69 dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request CAM control Read", slot);
70
71 if (slot != 0)
72 return -EINVAL;
73
74 return mantis_hif_read_iom(ca, addr);
75}
76
77static int mantis_ca_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr, u8 data)
78{
79 struct mantis_ca *ca = en50221->data;
80 struct mantis_pci *mantis = ca->ca_priv;
81
82 dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request CAM control Write", slot);
83
84 if (slot != 0)
85 return -EINVAL;
86
87 return mantis_hif_write_iom(ca, addr, data);
88}
89
90static int mantis_ca_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
91{
92 struct mantis_ca *ca = en50221->data;
93 struct mantis_pci *mantis = ca->ca_priv;
94
95 dprintk(MANTIS_DEBUG, 1, "Slot(%d): Slot RESET", slot);
96 udelay(500); /* Wait.. */
97 mmwrite(0xda, MANTIS_PCMCIA_RESET); /* Leading edge assert */
98 udelay(500);
99 mmwrite(0x00, MANTIS_PCMCIA_RESET); /* Trailing edge deassert */
100 msleep(1000);
101 dvb_ca_en50221_camready_irq(&ca->en50221, 0);
102
103 return 0;
104}
105
106static int mantis_ca_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
107{
108 struct mantis_ca *ca = en50221->data;
109 struct mantis_pci *mantis = ca->ca_priv;
110
111 dprintk(MANTIS_DEBUG, 1, "Slot(%d): Slot shutdown", slot);
112
113 return 0;
114}
115
116static int mantis_ts_control(struct dvb_ca_en50221 *en50221, int slot)
117{
118 struct mantis_ca *ca = en50221->data;
119 struct mantis_pci *mantis = ca->ca_priv;
120
121 dprintk(MANTIS_DEBUG, 1, "Slot(%d): TS control", slot);
122/* mantis_set_direction(mantis, 1); */ /* Enable TS through CAM */
123
124 return 0;
125}
126
127static int mantis_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
128{
129 struct mantis_ca *ca = en50221->data;
130 struct mantis_pci *mantis = ca->ca_priv;
131
132 dprintk(MANTIS_DEBUG, 1, "Slot(%d): Poll Slot status", slot);
133
134 if (ca->slot_state == MODULE_INSERTED) {
135 dprintk(MANTIS_DEBUG, 1, "CA Module present and ready");
136 return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY;
137 } else {
138 dprintk(MANTIS_DEBUG, 1, "CA Module not present or not ready");
139 }
140
141 return 0;
142}
143
144int mantis_ca_init(struct mantis_pci *mantis)
145{
146 struct dvb_adapter *dvb_adapter = &mantis->dvb_adapter;
147 struct mantis_ca *ca;
148 int ca_flags = 0, result;
149
150 dprintk(MANTIS_DEBUG, 1, "Initializing Mantis CA");
151 ca = kzalloc(sizeof(struct mantis_ca), GFP_KERNEL);
152 if (!ca) {
153 dprintk(MANTIS_ERROR, 1, "Out of memory!, exiting ..");
154 result = -ENOMEM;
155 goto err;
156 }
157
158 ca->ca_priv = mantis;
159 mantis->mantis_ca = ca;
160 ca_flags = DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE;
161 /* register CA interface */
162 ca->en50221.owner = THIS_MODULE;
163 ca->en50221.read_attribute_mem = mantis_ca_read_attr_mem;
164 ca->en50221.write_attribute_mem = mantis_ca_write_attr_mem;
165 ca->en50221.read_cam_control = mantis_ca_read_cam_ctl;
166 ca->en50221.write_cam_control = mantis_ca_write_cam_ctl;
167 ca->en50221.slot_reset = mantis_ca_slot_reset;
168 ca->en50221.slot_shutdown = mantis_ca_slot_shutdown;
169 ca->en50221.slot_ts_enable = mantis_ts_control;
170 ca->en50221.poll_slot_status = mantis_slot_status;
171 ca->en50221.data = ca;
172
173 mutex_init(&ca->ca_lock);
174
175 init_waitqueue_head(&ca->hif_data_wq);
176 init_waitqueue_head(&ca->hif_opdone_wq);
177 init_waitqueue_head(&ca->hif_write_wq);
178
179 dprintk(MANTIS_ERROR, 1, "Registering EN50221 device");
180 result = dvb_ca_en50221_init(dvb_adapter, &ca->en50221, ca_flags, 1);
181 if (result != 0) {
182 dprintk(MANTIS_ERROR, 1, "EN50221: Initialization failed <%d>", result);
183 goto err;
184 }
185 dprintk(MANTIS_ERROR, 1, "Registered EN50221 device");
186 mantis_evmgr_init(ca);
187 return 0;
188err:
189 kfree(ca);
190 return result;
191}
192EXPORT_SYMBOL_GPL(mantis_ca_init);
193
194void mantis_ca_exit(struct mantis_pci *mantis)
195{
196 struct mantis_ca *ca = mantis->mantis_ca;
197
198 dprintk(MANTIS_DEBUG, 1, "Mantis CA exit");
199
200 mantis_evmgr_exit(ca);
201 dprintk(MANTIS_ERROR, 1, "Unregistering EN50221 device");
202 if (ca)
203 dvb_ca_en50221_release(&ca->en50221);
204
205 kfree(ca);
206}
207EXPORT_SYMBOL_GPL(mantis_ca_exit);
diff --git a/drivers/media/dvb/mantis/mantis_ca.h b/drivers/media/dvb/mantis/mantis_ca.h
new file mode 100644
index 000000000000..dc63e55f7eca
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ca.h
@@ -0,0 +1,27 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_CA_H
22#define __MANTIS_CA_H
23
24extern int mantis_ca_init(struct mantis_pci *mantis);
25extern void mantis_ca_exit(struct mantis_pci *mantis);
26
27#endif /* __MANTIS_CA_H */
diff --git a/drivers/media/dvb/mantis/mantis_cards.c b/drivers/media/dvb/mantis/mantis_cards.c
new file mode 100644
index 000000000000..16f1708fd3bc
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_cards.c
@@ -0,0 +1,305 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/kernel.h>
24#include <linux/pci.h>
25#include <asm/irq.h>
26#include <linux/interrupt.h>
27
28#include "dmxdev.h"
29#include "dvbdev.h"
30#include "dvb_demux.h"
31#include "dvb_frontend.h"
32#include "dvb_net.h"
33
34#include "mantis_common.h"
35
36#include "mantis_vp1033.h"
37#include "mantis_vp1034.h"
38#include "mantis_vp1041.h"
39#include "mantis_vp2033.h"
40#include "mantis_vp2040.h"
41#include "mantis_vp3030.h"
42
43#include "mantis_dma.h"
44#include "mantis_ca.h"
45#include "mantis_dvb.h"
46#include "mantis_uart.h"
47#include "mantis_ioc.h"
48#include "mantis_pci.h"
49#include "mantis_i2c.h"
50#include "mantis_reg.h"
51
52static unsigned int verbose;
53module_param(verbose, int, 0644);
54MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
55
56static int devs;
57
58#define DRIVER_NAME "Mantis"
59
60static char *label[10] = {
61 "DMA",
62 "IRQ-0",
63 "IRQ-1",
64 "OCERR",
65 "PABRT",
66 "RIPRR",
67 "PPERR",
68 "FTRGT",
69 "RISCI",
70 "RACK"
71};
72
73static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
74{
75 u32 stat = 0, mask = 0, lstat = 0, mstat = 0;
76 u32 rst_stat = 0, rst_mask = 0;
77
78 struct mantis_pci *mantis;
79 struct mantis_ca *ca;
80
81 mantis = (struct mantis_pci *) dev_id;
82 if (unlikely(mantis == NULL)) {
83 dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
84 return IRQ_NONE;
85 }
86 ca = mantis->mantis_ca;
87
88 stat = mmread(MANTIS_INT_STAT);
89 mask = mmread(MANTIS_INT_MASK);
90 mstat = lstat = stat & ~MANTIS_INT_RISCSTAT;
91 if (!(stat & mask))
92 return IRQ_NONE;
93
94 rst_mask = MANTIS_GPIF_WRACK |
95 MANTIS_GPIF_OTHERR |
96 MANTIS_SBUF_WSTO |
97 MANTIS_GPIF_EXTIRQ;
98
99 rst_stat = mmread(MANTIS_GPIF_STATUS);
100 rst_stat &= rst_mask;
101 mmwrite(rst_stat, MANTIS_GPIF_STATUS);
102
103 mantis->mantis_int_stat = stat;
104 mantis->mantis_int_mask = mask;
105 dprintk(MANTIS_DEBUG, 0, "\n-- Stat=<%02x> Mask=<%02x> --", stat, mask);
106 if (stat & MANTIS_INT_RISCEN) {
107 dprintk(MANTIS_DEBUG, 0, "<%s>", label[0]);
108 }
109 if (stat & MANTIS_INT_IRQ0) {
110 dprintk(MANTIS_DEBUG, 0, "<%s>", label[1]);
111 mantis->gpif_status = rst_stat;
112 wake_up(&ca->hif_write_wq);
113 schedule_work(&ca->hif_evm_work);
114 }
115 if (stat & MANTIS_INT_IRQ1) {
116 dprintk(MANTIS_DEBUG, 0, "<%s>", label[2]);
117 schedule_work(&mantis->uart_work);
118 }
119 if (stat & MANTIS_INT_OCERR) {
120 dprintk(MANTIS_DEBUG, 0, "<%s>", label[3]);
121 }
122 if (stat & MANTIS_INT_PABORT) {
123 dprintk(MANTIS_DEBUG, 0, "<%s>", label[4]);
124 }
125 if (stat & MANTIS_INT_RIPERR) {
126 dprintk(MANTIS_DEBUG, 0, "<%s>", label[5]);
127 }
128 if (stat & MANTIS_INT_PPERR) {
129 dprintk(MANTIS_DEBUG, 0, "<%s>", label[6]);
130 }
131 if (stat & MANTIS_INT_FTRGT) {
132 dprintk(MANTIS_DEBUG, 0, "<%s>", label[7]);
133 }
134 if (stat & MANTIS_INT_RISCI) {
135 dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]);
136 mantis->finished_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
137 tasklet_schedule(&mantis->tasklet);
138 }
139 if (stat & MANTIS_INT_I2CDONE) {
140 dprintk(MANTIS_DEBUG, 0, "<%s>", label[9]);
141 wake_up(&mantis->i2c_wq);
142 }
143 mmwrite(stat, MANTIS_INT_STAT);
144 stat &= ~(MANTIS_INT_RISCEN | MANTIS_INT_I2CDONE |
145 MANTIS_INT_I2CRACK | MANTIS_INT_PCMCIA7 |
146 MANTIS_INT_PCMCIA6 | MANTIS_INT_PCMCIA5 |
147 MANTIS_INT_PCMCIA4 | MANTIS_INT_PCMCIA3 |
148 MANTIS_INT_PCMCIA2 | MANTIS_INT_PCMCIA1 |
149 MANTIS_INT_PCMCIA0 | MANTIS_INT_IRQ1 |
150 MANTIS_INT_IRQ0 | MANTIS_INT_OCERR |
151 MANTIS_INT_PABORT | MANTIS_INT_RIPERR |
152 MANTIS_INT_PPERR | MANTIS_INT_FTRGT |
153 MANTIS_INT_RISCI);
154
155 if (stat)
156 dprintk(MANTIS_DEBUG, 0, "<Unknown> Stat=<%02x> Mask=<%02x>", stat, mask);
157
158 dprintk(MANTIS_DEBUG, 0, "\n");
159 return IRQ_HANDLED;
160}
161
162static int __devinit mantis_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
163{
164 struct mantis_pci *mantis;
165 struct mantis_hwconfig *config;
166 int err = 0;
167
168 mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
169 if (mantis == NULL) {
170 printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
171 err = -ENOMEM;
172 goto fail0;
173 }
174
175 mantis->num = devs;
176 mantis->verbose = verbose;
177 mantis->pdev = pdev;
178 config = (struct mantis_hwconfig *) pci_id->driver_data;
179 config->irq_handler = &mantis_irq_handler;
180 mantis->hwconfig = config;
181
182 err = mantis_pci_init(mantis);
183 if (err) {
184 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI initialization failed <%d>", err);
185 goto fail1;
186 }
187
188 err = mantis_stream_control(mantis, STREAM_TO_HIF);
189 if (err < 0) {
190 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis stream control failed <%d>", err);
191 goto fail1;
192 }
193
194 err = mantis_i2c_init(mantis);
195 if (err < 0) {
196 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C initialization failed <%d>", err);
197 goto fail2;
198 }
199
200 err = mantis_get_mac(mantis);
201 if (err < 0) {
202 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis MAC address read failed <%d>", err);
203 goto fail2;
204 }
205
206 err = mantis_dma_init(mantis);
207 if (err < 0) {
208 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA initialization failed <%d>", err);
209 goto fail3;
210 }
211
212 err = mantis_dvb_init(mantis);
213 if (err < 0) {
214 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DVB initialization failed <%d>", err);
215 goto fail4;
216 }
217 err = mantis_uart_init(mantis);
218 if (err < 0) {
219 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis UART initialization failed <%d>", err);
220 goto fail6;
221 }
222
223 devs++;
224
225 return err;
226
227
228 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis UART exit! <%d>", err);
229 mantis_uart_exit(mantis);
230
231fail6:
232fail4:
233 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA exit! <%d>", err);
234 mantis_dma_exit(mantis);
235
236fail3:
237 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C exit! <%d>", err);
238 mantis_i2c_exit(mantis);
239
240fail2:
241 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI exit! <%d>", err);
242 mantis_pci_exit(mantis);
243
244fail1:
245 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis free! <%d>", err);
246 kfree(mantis);
247
248fail0:
249 return err;
250}
251
252static void __devexit mantis_pci_remove(struct pci_dev *pdev)
253{
254 struct mantis_pci *mantis = pci_get_drvdata(pdev);
255
256 if (mantis) {
257
258 mantis_uart_exit(mantis);
259 mantis_dvb_exit(mantis);
260 mantis_dma_exit(mantis);
261 mantis_i2c_exit(mantis);
262 mantis_pci_exit(mantis);
263 kfree(mantis);
264 }
265 return;
266}
267
268static struct pci_device_id mantis_pci_table[] = {
269 MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_1033_DVB_S, &vp1033_config),
270 MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_1034_DVB_S, &vp1034_config),
271 MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_1041_DVB_S2, &vp1041_config),
272 MAKE_ENTRY(TECHNISAT, SKYSTAR_HD2_10, &vp1041_config),
273 MAKE_ENTRY(TECHNISAT, SKYSTAR_HD2_20, &vp1041_config),
274 MAKE_ENTRY(TERRATEC, CINERGY_S2_PCI_HD, &vp1041_config),
275 MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_2033_DVB_C, &vp2033_config),
276 MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_2040_DVB_C, &vp2040_config),
277 MAKE_ENTRY(TECHNISAT, CABLESTAR_HD2, &vp2040_config),
278 MAKE_ENTRY(TERRATEC, CINERGY_C, &vp2033_config),
279 MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3030_DVB_T, &vp3030_config),
280 { }
281};
282
283static struct pci_driver mantis_pci_driver = {
284 .name = DRIVER_NAME,
285 .id_table = mantis_pci_table,
286 .probe = mantis_pci_probe,
287 .remove = mantis_pci_remove,
288};
289
290static int __devinit mantis_init(void)
291{
292 return pci_register_driver(&mantis_pci_driver);
293}
294
295static void __devexit mantis_exit(void)
296{
297 return pci_unregister_driver(&mantis_pci_driver);
298}
299
300module_init(mantis_init);
301module_exit(mantis_exit);
302
303MODULE_DESCRIPTION("MANTIS driver");
304MODULE_AUTHOR("Manu Abraham");
305MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/mantis/mantis_common.h b/drivers/media/dvb/mantis/mantis_common.h
new file mode 100644
index 000000000000..d0b645a483c9
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_common.h
@@ -0,0 +1,179 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_COMMON_H
22#define __MANTIS_COMMON_H
23
24#include <linux/mutex.h>
25#include <linux/workqueue.h>
26
27#include "mantis_uart.h"
28
29#include "mantis_link.h"
30
31#define MANTIS_ERROR 0
32#define MANTIS_NOTICE 1
33#define MANTIS_INFO 2
34#define MANTIS_DEBUG 3
35#define MANTIS_TMG 9
36
37#define dprintk(y, z, format, arg...) do { \
38 if (z) { \
39 if ((mantis->verbose > MANTIS_ERROR) && (mantis->verbose > y)) \
40 printk(KERN_ERR "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
41 else if ((mantis->verbose > MANTIS_NOTICE) && (mantis->verbose > y)) \
42 printk(KERN_NOTICE "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
43 else if ((mantis->verbose > MANTIS_INFO) && (mantis->verbose > y)) \
44 printk(KERN_INFO "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
45 else if ((mantis->verbose > MANTIS_DEBUG) && (mantis->verbose > y)) \
46 printk(KERN_DEBUG "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
47 else if ((mantis->verbose > MANTIS_TMG) && (mantis->verbose > y)) \
48 printk(KERN_DEBUG "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
49 } else { \
50 if (mantis->verbose > y) \
51 printk(format , ##arg); \
52 } \
53} while(0)
54
55#define mwrite(dat, addr) writel((dat), addr)
56#define mread(addr) readl(addr)
57
58#define mmwrite(dat, addr) mwrite((dat), (mantis->mmio + (addr)))
59#define mmread(addr) mread(mantis->mmio + (addr))
60
61#define MANTIS_TS_188 0
62#define MANTIS_TS_204 1
63
64#define TWINHAN_TECHNOLOGIES 0x1822
65#define MANTIS 0x4e35
66
67#define TECHNISAT 0x1ae4
68#define TERRATEC 0x153b
69
70#define MAKE_ENTRY(__subven, __subdev, __configptr) { \
71 .vendor = TWINHAN_TECHNOLOGIES, \
72 .device = MANTIS, \
73 .subvendor = (__subven), \
74 .subdevice = (__subdev), \
75 .driver_data = (unsigned long) (__configptr) \
76}
77
78enum mantis_i2c_mode {
79 MANTIS_PAGE_MODE = 0,
80 MANTIS_BYTE_MODE,
81};
82
83struct mantis_pci;
84
85struct mantis_hwconfig {
86 char *model_name;
87 char *dev_type;
88 u32 ts_size;
89
90 enum mantis_baud baud_rate;
91 enum mantis_parity parity;
92 u32 bytes;
93
94 irqreturn_t (*irq_handler)(int irq, void *dev_id);
95 int (*frontend_init)(struct mantis_pci *mantis, struct dvb_frontend *fe);
96
97 u8 power;
98 u8 reset;
99
100 enum mantis_i2c_mode i2c_mode;
101};
102
103struct mantis_pci {
104 unsigned int verbose;
105
106 /* PCI stuff */
107 u16 vendor_id;
108 u16 device_id;
109 u16 subsystem_vendor;
110 u16 subsystem_device;
111
112 u8 latency;
113
114 struct pci_dev *pdev;
115
116 unsigned long mantis_addr;
117 void __iomem *mmio;
118
119 u8 irq;
120 u8 revision;
121
122 unsigned int num;
123
124 /* RISC Core */
125 u32 finished_block;
126 u32 last_block;
127 u32 line_bytes;
128 u32 line_count;
129 u32 risc_pos;
130 u8 *buf_cpu;
131 dma_addr_t buf_dma;
132 u32 *risc_cpu;
133 dma_addr_t risc_dma;
134
135 struct tasklet_struct tasklet;
136
137 struct i2c_adapter adapter;
138 int i2c_rc;
139 wait_queue_head_t i2c_wq;
140 struct mutex i2c_lock;
141
142 /* DVB stuff */
143 struct dvb_adapter dvb_adapter;
144 struct dvb_frontend *fe;
145 struct dvb_demux demux;
146 struct dmxdev dmxdev;
147 struct dmx_frontend fe_hw;
148 struct dmx_frontend fe_mem;
149 struct dvb_net dvbnet;
150
151 u8 feeds;
152
153 struct mantis_hwconfig *hwconfig;
154
155 u32 mantis_int_stat;
156 u32 mantis_int_mask;
157
158 /* board specific */
159 u8 mac_address[8];
160 u32 sub_vendor_id;
161 u32 sub_device_id;
162
163 /* A12 A13 A14 */
164 u32 gpio_status;
165
166 u32 gpif_status;
167
168 struct mantis_ca *mantis_ca;
169
170 wait_queue_head_t uart_wq;
171 struct work_struct uart_work;
172 spinlock_t uart_lock;
173
174 struct input_dev *rc;
175};
176
177#define MANTIS_HIF_STATUS (mantis->gpio_status)
178
179#endif /* __MANTIS_COMMON_H */
diff --git a/drivers/media/dvb/mantis/mantis_core.c b/drivers/media/dvb/mantis/mantis_core.c
new file mode 100644
index 000000000000..8113b23ce448
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_core.c
@@ -0,0 +1,238 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include "mantis_common.h"
22#include "mantis_core.h"
23#include "mantis_vp1033.h"
24#include "mantis_vp1034.h"
25#include "mantis_vp1041.h"
26#include "mantis_vp2033.h"
27#include "mantis_vp2040.h"
28#include "mantis_vp3030.h"
29
30static int read_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length)
31{
32 int err;
33 struct i2c_msg msg[] = {
34 {
35 .addr = 0x50,
36 .flags = 0,
37 .buf = data,
38 .len = 1
39 }, {
40 .addr = 0x50,
41 .flags = I2C_M_RD,
42 .buf = data,
43 .len = length
44 },
45 };
46
47 err = i2c_transfer(&mantis->adapter, msg, 2);
48 if (err < 0) {
49 dprintk(verbose, MANTIS_ERROR, 1,
50 "ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >",
51 err, data[0], data[1]);
52
53 return err;
54 }
55
56 return 0;
57}
58
59static int write_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length)
60{
61 int err;
62
63 struct i2c_msg msg = {
64 .addr = 0x50,
65 .flags = 0,
66 .buf = data,
67 .len = length
68 };
69
70 err = i2c_transfer(&mantis->adapter, &msg, 1);
71 if (err < 0) {
72 dprintk(verbose, MANTIS_ERROR, 1,
73 "ERROR: i2c write: < err=%i length=0x%02x d0=0x%02x, d1=0x%02x >",
74 err, length, data[0], data[1]);
75
76 return err;
77 }
78
79 return 0;
80}
81
82static int get_mac_address(struct mantis_pci *mantis)
83{
84 int err;
85
86 mantis->mac_address[0] = 0x08;
87 err = read_eeprom_byte(mantis, &mantis->mac_address[0], 6);
88 if (err < 0) {
89 dprintk(verbose, MANTIS_ERROR, 1, "Mantis EEPROM read error");
90
91 return err;
92 }
93 dprintk(verbose, MANTIS_ERROR, 0,
94 " MAC Address=[%02x:%02x:%02x:%02x:%02x:%02x]\n",
95 mantis->mac_address[0], mantis->mac_address[1],
96 mantis->mac_address[2], mantis->mac_address[3],
97 mantis->mac_address[4], mantis->mac_address[5]);
98
99 return 0;
100}
101
102#define MANTIS_MODEL_UNKNOWN "UNKNOWN"
103#define MANTIS_DEV_UNKNOWN "UNKNOWN"
104
105struct mantis_hwconfig unknown_device = {
106 .model_name = MANTIS_MODEL_UNKNOWN,
107 .dev_type = MANTIS_DEV_UNKNOWN,
108};
109
110static void mantis_load_config(struct mantis_pci *mantis)
111{
112 switch (mantis->subsystem_device) {
113 case MANTIS_VP_1033_DVB_S: /* VP-1033 */
114 mantis->hwconfig = &vp1033_mantis_config;
115 break;
116 case MANTIS_VP_1034_DVB_S: /* VP-1034 */
117 mantis->hwconfig = &vp1034_mantis_config;
118 break;
119 case MANTIS_VP_1041_DVB_S2: /* VP-1041 */
120 case TECHNISAT_SKYSTAR_HD2:
121 mantis->hwconfig = &vp1041_mantis_config;
122 break;
123 case MANTIS_VP_2033_DVB_C: /* VP-2033 */
124 mantis->hwconfig = &vp2033_mantis_config;
125 break;
126 case MANTIS_VP_2040_DVB_C: /* VP-2040 */
127 case TERRATEC_CINERGY_C_PCI: /* VP-2040 clone */
128 case TECHNISAT_CABLESTAR_HD2:
129 mantis->hwconfig = &vp2040_mantis_config;
130 break;
131 case MANTIS_VP_3030_DVB_T: /* VP-3030 */
132 mantis->hwconfig = &vp3030_mantis_config;
133 break;
134 default:
135 mantis->hwconfig = &unknown_device;
136 break;
137 }
138}
139
140int mantis_core_init(struct mantis_pci *mantis)
141{
142 int err = 0;
143
144 mantis_load_config(mantis);
145 dprintk(verbose, MANTIS_ERROR, 0, "found a %s PCI %s device on (%02x:%02x.%x),\n",
146 mantis->hwconfig->model_name, mantis->hwconfig->dev_type,
147 mantis->pdev->bus->number, PCI_SLOT(mantis->pdev->devfn), PCI_FUNC(mantis->pdev->devfn));
148 dprintk(verbose, MANTIS_ERROR, 0, " Mantis Rev %d [%04x:%04x], ",
149 mantis->revision,
150 mantis->subsystem_vendor, mantis->subsystem_device);
151 dprintk(verbose, MANTIS_ERROR, 0,
152 "irq: %d, latency: %d\n memory: 0x%lx, mmio: 0x%p\n",
153 mantis->pdev->irq, mantis->latency,
154 mantis->mantis_addr, mantis->mantis_mmio);
155
156 err = mantis_i2c_init(mantis);
157 if (err < 0) {
158 dprintk(verbose, MANTIS_ERROR, 1, "Mantis I2C init failed");
159 return err;
160 }
161 err = get_mac_address(mantis);
162 if (err < 0) {
163 dprintk(verbose, MANTIS_ERROR, 1, "get MAC address failed");
164 return err;
165 }
166 err = mantis_dma_init(mantis);
167 if (err < 0) {
168 dprintk(verbose, MANTIS_ERROR, 1, "Mantis DMA init failed");
169 return err;
170 }
171 err = mantis_dvb_init(mantis);
172 if (err < 0) {
173 dprintk(verbose, MANTIS_DEBUG, 1, "Mantis DVB init failed");
174 return err;
175 }
176 err = mantis_uart_init(mantis);
177 if (err < 0) {
178 dprintk(verbose, MANTIS_DEBUG, 1, "Mantis UART init failed");
179 return err;
180 }
181
182 return 0;
183}
184
185int mantis_core_exit(struct mantis_pci *mantis)
186{
187 mantis_dma_stop(mantis);
188 dprintk(verbose, MANTIS_ERROR, 1, "DMA engine stopping");
189
190 mantis_uart_exit(mantis);
191 dprintk(verbose, MANTIS_ERROR, 1, "UART exit failed");
192
193 if (mantis_dma_exit(mantis) < 0)
194 dprintk(verbose, MANTIS_ERROR, 1, "DMA exit failed");
195 if (mantis_dvb_exit(mantis) < 0)
196 dprintk(verbose, MANTIS_ERROR, 1, "DVB exit failed");
197 if (mantis_i2c_exit(mantis) < 0)
198 dprintk(verbose, MANTIS_ERROR, 1, "I2C adapter delete.. failed");
199
200 return 0;
201}
202
203/* Turn the given bit on or off. */
204void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value)
205{
206 u32 cur;
207
208 cur = mmread(MANTIS_GPIF_ADDR);
209 if (value)
210 mantis->gpio_status = cur | (1 << bitpos);
211 else
212 mantis->gpio_status = cur & (~(1 << bitpos));
213
214 mmwrite(mantis->gpio_status, MANTIS_GPIF_ADDR);
215 mmwrite(0x00, MANTIS_GPIF_DOUT);
216 udelay(100);
217}
218
219/* direction = 0 , no CI passthrough ; 1 , CI passthrough */
220void mantis_set_direction(struct mantis_pci *mantis, int direction)
221{
222 u32 reg;
223
224 reg = mmread(0x28);
225 dprintk(verbose, MANTIS_DEBUG, 1, "TS direction setup");
226 if (direction == 0x01) {
227 /* to CI */
228 reg |= 0x04;
229 mmwrite(reg, 0x28);
230 reg &= 0xff - 0x04;
231 mmwrite(reg, 0x28);
232 } else {
233 reg &= 0xff - 0x04;
234 mmwrite(reg, 0x28);
235 reg |= 0x04;
236 mmwrite(reg, 0x28);
237 }
238}
diff --git a/drivers/media/dvb/mantis/mantis_core.h b/drivers/media/dvb/mantis/mantis_core.h
new file mode 100644
index 000000000000..833ee42e694e
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_core.h
@@ -0,0 +1,57 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_CORE_H
22#define __MANTIS_CORE_H
23
24#include "mantis_common.h"
25
26
27#define FE_TYPE_SAT 0
28#define FE_TYPE_CAB 1
29#define FE_TYPE_TER 2
30
31#define FE_TYPE_TS204 0
32#define FE_TYPE_TS188 1
33
34
35struct vendorname {
36 u8 *sub_vendor_name;
37 u32 sub_vendor_id;
38};
39
40struct devicetype {
41 u8 *sub_device_name;
42 u32 sub_device_id;
43 u8 device_type;
44 u32 type_flags;
45};
46
47
48extern int mantis_dma_init(struct mantis_pci *mantis);
49extern int mantis_dma_exit(struct mantis_pci *mantis);
50extern void mantis_dma_start(struct mantis_pci *mantis);
51extern void mantis_dma_stop(struct mantis_pci *mantis);
52extern int mantis_i2c_init(struct mantis_pci *mantis);
53extern int mantis_i2c_exit(struct mantis_pci *mantis);
54extern int mantis_core_init(struct mantis_pci *mantis);
55extern int mantis_core_exit(struct mantis_pci *mantis);
56
57#endif /* __MANTIS_CORE_H */
diff --git a/drivers/media/dvb/mantis/mantis_dma.c b/drivers/media/dvb/mantis/mantis_dma.c
new file mode 100644
index 000000000000..46202a4012aa
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dma.c
@@ -0,0 +1,256 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/kernel.h>
22#include <asm/page.h>
23#include <linux/vmalloc.h>
24#include <linux/pci.h>
25
26#include <asm/irq.h>
27#include <linux/signal.h>
28#include <linux/sched.h>
29#include <linux/interrupt.h>
30
31#include "dmxdev.h"
32#include "dvbdev.h"
33#include "dvb_demux.h"
34#include "dvb_frontend.h"
35#include "dvb_net.h"
36
37#include "mantis_common.h"
38#include "mantis_reg.h"
39#include "mantis_dma.h"
40
41#define RISC_WRITE (0x01 << 28)
42#define RISC_JUMP (0x07 << 28)
43#define RISC_IRQ (0x01 << 24)
44
45#define RISC_STATUS(status) ((((~status) & 0x0f) << 20) | ((status & 0x0f) << 16))
46#define RISC_FLUSH() (mantis->risc_pos = 0)
47#define RISC_INSTR(opcode) (mantis->risc_cpu[mantis->risc_pos++] = cpu_to_le32(opcode))
48
49#define MANTIS_BUF_SIZE (64 * 1024)
50#define MANTIS_BLOCK_BYTES (MANTIS_BUF_SIZE >> 4)
51#define MANTIS_BLOCK_COUNT (1 << 4)
52#define MANTIS_RISC_SIZE PAGE_SIZE
53
54int mantis_dma_exit(struct mantis_pci *mantis)
55{
56 if (mantis->buf_cpu) {
57 dprintk(MANTIS_ERROR, 1,
58 "DMA=0x%lx cpu=0x%p size=%d",
59 (unsigned long) mantis->buf_dma,
60 mantis->buf_cpu,
61 MANTIS_BUF_SIZE);
62
63 pci_free_consistent(mantis->pdev, MANTIS_BUF_SIZE,
64 mantis->buf_cpu, mantis->buf_dma);
65
66 mantis->buf_cpu = NULL;
67 }
68 if (mantis->risc_cpu) {
69 dprintk(MANTIS_ERROR, 1,
70 "RISC=0x%lx cpu=0x%p size=%lx",
71 (unsigned long) mantis->risc_dma,
72 mantis->risc_cpu,
73 MANTIS_RISC_SIZE);
74
75 pci_free_consistent(mantis->pdev, MANTIS_RISC_SIZE,
76 mantis->risc_cpu, mantis->risc_dma);
77
78 mantis->risc_cpu = NULL;
79 }
80
81 return 0;
82}
83EXPORT_SYMBOL_GPL(mantis_dma_exit);
84
85static inline int mantis_alloc_buffers(struct mantis_pci *mantis)
86{
87 if (!mantis->buf_cpu) {
88 mantis->buf_cpu = pci_alloc_consistent(mantis->pdev,
89 MANTIS_BUF_SIZE,
90 &mantis->buf_dma);
91 if (!mantis->buf_cpu) {
92 dprintk(MANTIS_ERROR, 1,
93 "DMA buffer allocation failed");
94
95 goto err;
96 }
97 dprintk(MANTIS_ERROR, 1,
98 "DMA=0x%lx cpu=0x%p size=%d",
99 (unsigned long) mantis->buf_dma,
100 mantis->buf_cpu, MANTIS_BUF_SIZE);
101 }
102 if (!mantis->risc_cpu) {
103 mantis->risc_cpu = pci_alloc_consistent(mantis->pdev,
104 MANTIS_RISC_SIZE,
105 &mantis->risc_dma);
106
107 if (!mantis->risc_cpu) {
108 dprintk(MANTIS_ERROR, 1,
109 "RISC program allocation failed");
110
111 mantis_dma_exit(mantis);
112
113 goto err;
114 }
115 dprintk(MANTIS_ERROR, 1,
116 "RISC=0x%lx cpu=0x%p size=%lx",
117 (unsigned long) mantis->risc_dma,
118 mantis->risc_cpu, MANTIS_RISC_SIZE);
119 }
120
121 return 0;
122err:
123 dprintk(MANTIS_ERROR, 1, "Out of memory (?) .....");
124 return -ENOMEM;
125}
126
127static inline int mantis_calc_lines(struct mantis_pci *mantis)
128{
129 mantis->line_bytes = MANTIS_BLOCK_BYTES;
130 mantis->line_count = MANTIS_BLOCK_COUNT;
131
132 while (mantis->line_bytes > 4095) {
133 mantis->line_bytes >>= 1;
134 mantis->line_count <<= 1;
135 }
136
137 dprintk(MANTIS_DEBUG, 1, "Mantis RISC block bytes=[%d], line bytes=[%d], line count=[%d]",
138 MANTIS_BLOCK_BYTES, mantis->line_bytes, mantis->line_count);
139
140 if (mantis->line_count > 255) {
141 dprintk(MANTIS_ERROR, 1, "Buffer size error");
142 return -EINVAL;
143 }
144
145 return 0;
146}
147
148int mantis_dma_init(struct mantis_pci *mantis)
149{
150 int err = 0;
151
152 dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
153 if (mantis_alloc_buffers(mantis) < 0) {
154 dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
155
156 /* Stop RISC Engine */
157 mmwrite(0, MANTIS_DMA_CTL);
158
159 goto err;
160 }
161 err = mantis_calc_lines(mantis);
162 if (err < 0) {
163 dprintk(MANTIS_ERROR, 1, "Mantis calc lines failed");
164
165 goto err;
166 }
167
168 return 0;
169err:
170 return err;
171}
172EXPORT_SYMBOL_GPL(mantis_dma_init);
173
174static inline void mantis_risc_program(struct mantis_pci *mantis)
175{
176 u32 buf_pos = 0;
177 u32 line;
178
179 dprintk(MANTIS_DEBUG, 1, "Mantis create RISC program");
180 RISC_FLUSH();
181
182 dprintk(MANTIS_DEBUG, 1, "risc len lines %u, bytes per line %u",
183 mantis->line_count, mantis->line_bytes);
184
185 for (line = 0; line < mantis->line_count; line++) {
186 dprintk(MANTIS_DEBUG, 1, "RISC PROG line=[%d]", line);
187 if (!(buf_pos % MANTIS_BLOCK_BYTES)) {
188 RISC_INSTR(RISC_WRITE |
189 RISC_IRQ |
190 RISC_STATUS(((buf_pos / MANTIS_BLOCK_BYTES) +
191 (MANTIS_BLOCK_COUNT - 1)) %
192 MANTIS_BLOCK_COUNT) |
193 mantis->line_bytes);
194 } else {
195 RISC_INSTR(RISC_WRITE | mantis->line_bytes);
196 }
197 RISC_INSTR(mantis->buf_dma + buf_pos);
198 buf_pos += mantis->line_bytes;
199 }
200 RISC_INSTR(RISC_JUMP);
201 RISC_INSTR(mantis->risc_dma);
202}
203
204void mantis_dma_start(struct mantis_pci *mantis)
205{
206 dprintk(MANTIS_DEBUG, 1, "Mantis Start DMA engine");
207
208 mantis_risc_program(mantis);
209 mmwrite(mantis->risc_dma, MANTIS_RISC_START);
210 mmwrite(mmread(MANTIS_GPIF_ADDR) | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR);
211
212 mmwrite(0, MANTIS_DMA_CTL);
213 mantis->last_block = mantis->finished_block = 0;
214
215 mmwrite(mmread(MANTIS_INT_MASK) | MANTIS_INT_RISCI, MANTIS_INT_MASK);
216
217 mmwrite(MANTIS_FIFO_EN | MANTIS_DCAP_EN
218 | MANTIS_RISC_EN, MANTIS_DMA_CTL);
219
220}
221
222void mantis_dma_stop(struct mantis_pci *mantis)
223{
224 u32 stat = 0, mask = 0;
225
226 stat = mmread(MANTIS_INT_STAT);
227 mask = mmread(MANTIS_INT_MASK);
228 dprintk(MANTIS_DEBUG, 1, "Mantis Stop DMA engine");
229
230 mmwrite((mmread(MANTIS_GPIF_ADDR) & (~(MANTIS_GPIF_HIFRDWRN))), MANTIS_GPIF_ADDR);
231
232 mmwrite((mmread(MANTIS_DMA_CTL) & ~(MANTIS_FIFO_EN |
233 MANTIS_DCAP_EN |
234 MANTIS_RISC_EN)), MANTIS_DMA_CTL);
235
236 mmwrite(mmread(MANTIS_INT_STAT), MANTIS_INT_STAT);
237
238 mmwrite(mmread(MANTIS_INT_MASK) & ~(MANTIS_INT_RISCI |
239 MANTIS_INT_RISCEN), MANTIS_INT_MASK);
240}
241
242
243void mantis_dma_xfer(unsigned long data)
244{
245 struct mantis_pci *mantis = (struct mantis_pci *) data;
246 struct mantis_hwconfig *config = mantis->hwconfig;
247
248 while (mantis->last_block != mantis->finished_block) {
249 dprintk(MANTIS_DEBUG, 1, "last block=[%d] finished block=[%d]",
250 mantis->last_block, mantis->finished_block);
251
252 (config->ts_size ? dvb_dmx_swfilter_204 : dvb_dmx_swfilter)
253 (&mantis->demux, &mantis->buf_cpu[mantis->last_block * MANTIS_BLOCK_BYTES], MANTIS_BLOCK_BYTES);
254 mantis->last_block = (mantis->last_block + 1) % MANTIS_BLOCK_COUNT;
255 }
256}
diff --git a/drivers/media/dvb/mantis/mantis_dma.h b/drivers/media/dvb/mantis/mantis_dma.h
new file mode 100644
index 000000000000..6be00fa82094
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dma.h
@@ -0,0 +1,30 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_DMA_H
22#define __MANTIS_DMA_H
23
24extern int mantis_dma_init(struct mantis_pci *mantis);
25extern int mantis_dma_exit(struct mantis_pci *mantis);
26extern void mantis_dma_start(struct mantis_pci *mantis);
27extern void mantis_dma_stop(struct mantis_pci *mantis);
28extern void mantis_dma_xfer(unsigned long data);
29
30#endif /* __MANTIS_DMA_H */
diff --git a/drivers/media/dvb/mantis/mantis_dvb.c b/drivers/media/dvb/mantis/mantis_dvb.c
new file mode 100644
index 000000000000..99d82eec3b03
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dvb.c
@@ -0,0 +1,296 @@
1/*
2 Mantis PCI bridge driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/
19
20#include <linux/kernel.h>
21#include <linux/bitops.h>
22
23#include <linux/signal.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/i2c.h>
28
29#include "dmxdev.h"
30#include "dvbdev.h"
31#include "dvb_demux.h"
32#include "dvb_frontend.h"
33#include "dvb_net.h"
34
35#include "mantis_common.h"
36#include "mantis_dma.h"
37#include "mantis_ca.h"
38#include "mantis_ioc.h"
39#include "mantis_dvb.h"
40
41DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
42
43int mantis_frontend_power(struct mantis_pci *mantis, enum mantis_power power)
44{
45 struct mantis_hwconfig *config = mantis->hwconfig;
46
47 switch (power) {
48 case POWER_ON:
49 dprintk(MANTIS_DEBUG, 1, "Power ON");
50 gpio_set_bits(mantis, config->power, POWER_ON);
51 msleep(100);
52 gpio_set_bits(mantis, config->power, POWER_ON);
53 msleep(100);
54 break;
55
56 case POWER_OFF:
57 dprintk(MANTIS_DEBUG, 1, "Power OFF");
58 gpio_set_bits(mantis, config->power, POWER_OFF);
59 msleep(100);
60 break;
61
62 default:
63 dprintk(MANTIS_DEBUG, 1, "Unknown state <%02x>", power);
64 return -1;
65 }
66
67 return 0;
68}
69EXPORT_SYMBOL_GPL(mantis_frontend_power);
70
71void mantis_frontend_soft_reset(struct mantis_pci *mantis)
72{
73 struct mantis_hwconfig *config = mantis->hwconfig;
74
75 dprintk(MANTIS_DEBUG, 1, "Frontend RESET");
76 gpio_set_bits(mantis, config->reset, 0);
77 msleep(100);
78 gpio_set_bits(mantis, config->reset, 0);
79 msleep(100);
80 gpio_set_bits(mantis, config->reset, 1);
81 msleep(100);
82 gpio_set_bits(mantis, config->reset, 1);
83 msleep(100);
84
85 return;
86}
87EXPORT_SYMBOL_GPL(mantis_frontend_soft_reset);
88
89static int mantis_frontend_shutdown(struct mantis_pci *mantis)
90{
91 int err;
92
93 mantis_frontend_soft_reset(mantis);
94 err = mantis_frontend_power(mantis, POWER_OFF);
95 if (err != 0) {
96 dprintk(MANTIS_ERROR, 1, "Frontend POWER OFF failed! <%d>", err);
97 return 1;
98 }
99
100 return 0;
101}
102
103static int mantis_dvb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
104{
105 struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
106 struct mantis_pci *mantis = dvbdmx->priv;
107
108 dprintk(MANTIS_DEBUG, 1, "Mantis DVB Start feed");
109 if (!dvbdmx->dmx.frontend) {
110 dprintk(MANTIS_DEBUG, 1, "no frontend ?");
111 return -EINVAL;
112 }
113
114 mantis->feeds++;
115 dprintk(MANTIS_DEBUG, 1, "mantis start feed, feeds=%d", mantis->feeds);
116
117 if (mantis->feeds == 1) {
118 dprintk(MANTIS_DEBUG, 1, "mantis start feed & dma");
119 mantis_dma_start(mantis);
120 }
121
122 return mantis->feeds;
123}
124
125static int mantis_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
126{
127 struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
128 struct mantis_pci *mantis = dvbdmx->priv;
129
130 dprintk(MANTIS_DEBUG, 1, "Mantis DVB Stop feed");
131 if (!dvbdmx->dmx.frontend) {
132 dprintk(MANTIS_DEBUG, 1, "no frontend ?");
133 return -EINVAL;
134 }
135
136 mantis->feeds--;
137 if (mantis->feeds == 0) {
138 dprintk(MANTIS_DEBUG, 1, "mantis stop feed and dma");
139 mantis_dma_stop(mantis);
140 }
141
142 return 0;
143}
144
145int __devinit mantis_dvb_init(struct mantis_pci *mantis)
146{
147 struct mantis_hwconfig *config = mantis->hwconfig;
148 int result = -1;
149
150 dprintk(MANTIS_DEBUG, 1, "dvb_register_adapter");
151
152 result = dvb_register_adapter(&mantis->dvb_adapter,
153 "Mantis DVB adapter",
154 THIS_MODULE,
155 &mantis->pdev->dev,
156 adapter_nr);
157
158 if (result < 0) {
159
160 dprintk(MANTIS_ERROR, 1, "Error registering adapter");
161 return -ENODEV;
162 }
163
164 mantis->dvb_adapter.priv = mantis;
165 mantis->demux.dmx.capabilities = DMX_TS_FILTERING |
166 DMX_SECTION_FILTERING |
167 DMX_MEMORY_BASED_FILTERING;
168
169 mantis->demux.priv = mantis;
170 mantis->demux.filternum = 256;
171 mantis->demux.feednum = 256;
172 mantis->demux.start_feed = mantis_dvb_start_feed;
173 mantis->demux.stop_feed = mantis_dvb_stop_feed;
174 mantis->demux.write_to_decoder = NULL;
175
176 dprintk(MANTIS_DEBUG, 1, "dvb_dmx_init");
177 result = dvb_dmx_init(&mantis->demux);
178 if (result < 0) {
179 dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
180
181 goto err0;
182 }
183
184 mantis->dmxdev.filternum = 256;
185 mantis->dmxdev.demux = &mantis->demux.dmx;
186 mantis->dmxdev.capabilities = 0;
187 dprintk(MANTIS_DEBUG, 1, "dvb_dmxdev_init");
188
189 result = dvb_dmxdev_init(&mantis->dmxdev, &mantis->dvb_adapter);
190 if (result < 0) {
191
192 dprintk(MANTIS_ERROR, 1, "dvb_dmxdev_init failed, ERROR=%d", result);
193 goto err1;
194 }
195
196 mantis->fe_hw.source = DMX_FRONTEND_0;
197 result = mantis->demux.dmx.add_frontend(&mantis->demux.dmx, &mantis->fe_hw);
198 if (result < 0) {
199
200 dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
201 goto err2;
202 }
203
204 mantis->fe_mem.source = DMX_MEMORY_FE;
205 result = mantis->demux.dmx.add_frontend(&mantis->demux.dmx, &mantis->fe_mem);
206 if (result < 0) {
207 dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
208 goto err3;
209 }
210
211 result = mantis->demux.dmx.connect_frontend(&mantis->demux.dmx, &mantis->fe_hw);
212 if (result < 0) {
213 dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
214 goto err4;
215 }
216
217 dvb_net_init(&mantis->dvb_adapter, &mantis->dvbnet, &mantis->demux.dmx);
218 tasklet_init(&mantis->tasklet, mantis_dma_xfer, (unsigned long) mantis);
219 if (mantis->hwconfig) {
220 result = config->frontend_init(mantis, mantis->fe);
221 if (result < 0) {
222 dprintk(MANTIS_ERROR, 1, "!!! NO Frontends found !!!");
223 goto err5;
224 } else {
225 if (mantis->fe == NULL) {
226 dprintk(MANTIS_ERROR, 1, "FE <NULL>");
227 goto err5;
228 }
229
230 if (dvb_register_frontend(&mantis->dvb_adapter, mantis->fe)) {
231 dprintk(MANTIS_ERROR, 1, "ERROR: Frontend registration failed");
232
233 if (mantis->fe->ops.release)
234 mantis->fe->ops.release(mantis->fe);
235
236 mantis->fe = NULL;
237 goto err5;
238 }
239 }
240 }
241
242 return 0;
243
244 /* Error conditions .. */
245err5:
246 tasklet_kill(&mantis->tasklet);
247 dvb_net_release(&mantis->dvbnet);
248 dvb_unregister_frontend(mantis->fe);
249 dvb_frontend_detach(mantis->fe);
250err4:
251 mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
252
253err3:
254 mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_hw);
255
256err2:
257 dvb_dmxdev_release(&mantis->dmxdev);
258
259err1:
260 dvb_dmx_release(&mantis->demux);
261
262err0:
263 dvb_unregister_adapter(&mantis->dvb_adapter);
264
265 return result;
266}
267EXPORT_SYMBOL_GPL(mantis_dvb_init);
268
269int __devexit mantis_dvb_exit(struct mantis_pci *mantis)
270{
271 int err;
272
273 if (mantis->fe) {
274 /* mantis_ca_exit(mantis); */
275 err = mantis_frontend_shutdown(mantis);
276 if (err != 0)
277 dprintk(MANTIS_ERROR, 1, "Frontend exit while POWER ON! <%d>", err);
278 dvb_unregister_frontend(mantis->fe);
279 dvb_frontend_detach(mantis->fe);
280 }
281
282 tasklet_kill(&mantis->tasklet);
283 dvb_net_release(&mantis->dvbnet);
284
285 mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
286 mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_hw);
287
288 dvb_dmxdev_release(&mantis->dmxdev);
289 dvb_dmx_release(&mantis->demux);
290
291 dprintk(MANTIS_DEBUG, 1, "dvb_unregister_adapter");
292 dvb_unregister_adapter(&mantis->dvb_adapter);
293
294 return 0;
295}
296EXPORT_SYMBOL_GPL(mantis_dvb_exit);
diff --git a/drivers/media/dvb/mantis/mantis_dvb.h b/drivers/media/dvb/mantis/mantis_dvb.h
new file mode 100644
index 000000000000..464199db304e
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dvb.h
@@ -0,0 +1,35 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_DVB_H
22#define __MANTIS_DVB_H
23
24enum mantis_power {
25 POWER_OFF = 0,
26 POWER_ON = 1
27};
28
29extern int mantis_frontend_power(struct mantis_pci *mantis, enum mantis_power power);
30extern void mantis_frontend_soft_reset(struct mantis_pci *mantis);
31
32extern int mantis_dvb_init(struct mantis_pci *mantis);
33extern int mantis_dvb_exit(struct mantis_pci *mantis);
34
35#endif /* __MANTIS_DVB_H */
diff --git a/drivers/media/dvb/mantis/mantis_evm.c b/drivers/media/dvb/mantis/mantis_evm.c
new file mode 100644
index 000000000000..a7b369a439d6
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_evm.c
@@ -0,0 +1,117 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/kernel.h>
22
23#include <linux/signal.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
26
27#include "dmxdev.h"
28#include "dvbdev.h"
29#include "dvb_demux.h"
30#include "dvb_frontend.h"
31#include "dvb_net.h"
32
33#include "mantis_common.h"
34#include "mantis_link.h"
35#include "mantis_hif.h"
36#include "mantis_reg.h"
37
38static void mantis_hifevm_work(struct work_struct *work)
39{
40 struct mantis_ca *ca = container_of(work, struct mantis_ca, hif_evm_work);
41 struct mantis_pci *mantis = ca->ca_priv;
42
43 u32 gpif_stat, gpif_mask;
44
45 gpif_stat = mmread(MANTIS_GPIF_STATUS);
46 gpif_mask = mmread(MANTIS_GPIF_IRQCFG);
47
48 if (gpif_stat & MANTIS_GPIF_DETSTAT) {
49 if (gpif_stat & MANTIS_CARD_PLUGIN) {
50 dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): CAM Plugin", mantis->num);
51 mmwrite(0xdada0000, MANTIS_CARD_RESET);
52 mantis_event_cam_plugin(ca);
53 dvb_ca_en50221_camchange_irq(&ca->en50221,
54 0,
55 DVB_CA_EN50221_CAMCHANGE_INSERTED);
56 }
57 } else {
58 if (gpif_stat & MANTIS_CARD_PLUGOUT) {
59 dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): CAM Unplug", mantis->num);
60 mmwrite(0xdada0000, MANTIS_CARD_RESET);
61 mantis_event_cam_unplug(ca);
62 dvb_ca_en50221_camchange_irq(&ca->en50221,
63 0,
64 DVB_CA_EN50221_CAMCHANGE_REMOVED);
65 }
66 }
67
68 if (mantis->gpif_status & MANTIS_GPIF_EXTIRQ)
69 dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Ext IRQ", mantis->num);
70
71 if (mantis->gpif_status & MANTIS_SBUF_WSTO)
72 dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Timeout", mantis->num);
73
74 if (mantis->gpif_status & MANTIS_GPIF_OTHERR)
75 dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Alignment Error", mantis->num);
76
77 if (gpif_stat & MANTIS_SBUF_OVFLW)
78 dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Overflow", mantis->num);
79
80 if (gpif_stat & MANTIS_GPIF_BRRDY)
81 dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Read Ready", mantis->num);
82
83 if (gpif_stat & MANTIS_GPIF_INTSTAT)
84 dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): GPIF IRQ", mantis->num);
85
86 if (gpif_stat & MANTIS_SBUF_EMPTY)
87 dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Empty", mantis->num);
88
89 if (gpif_stat & MANTIS_SBUF_OPDONE) {
90 dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer operation complete", mantis->num);
91 ca->sbuf_status = MANTIS_SBUF_DATA_AVAIL;
92 ca->hif_event = MANTIS_SBUF_OPDONE;
93 wake_up(&ca->hif_opdone_wq);
94 }
95}
96
97int mantis_evmgr_init(struct mantis_ca *ca)
98{
99 struct mantis_pci *mantis = ca->ca_priv;
100
101 dprintk(MANTIS_DEBUG, 1, "Initializing Mantis Host I/F Event manager");
102 INIT_WORK(&ca->hif_evm_work, mantis_hifevm_work);
103 mantis_pcmcia_init(ca);
104 schedule_work(&ca->hif_evm_work);
105 mantis_hif_init(ca);
106 return 0;
107}
108
109void mantis_evmgr_exit(struct mantis_ca *ca)
110{
111 struct mantis_pci *mantis = ca->ca_priv;
112
113 dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting");
114 flush_scheduled_work();
115 mantis_hif_exit(ca);
116 mantis_pcmcia_exit(ca);
117}
diff --git a/drivers/media/dvb/mantis/mantis_hif.c b/drivers/media/dvb/mantis/mantis_hif.c
new file mode 100644
index 000000000000..7477dac628b4
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_hif.c
@@ -0,0 +1,240 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/kernel.h>
22#include <linux/signal.h>
23#include <linux/sched.h>
24
25#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/interrupt.h>
28
29#include "dmxdev.h"
30#include "dvbdev.h"
31#include "dvb_demux.h"
32#include "dvb_frontend.h"
33#include "dvb_net.h"
34
35#include "mantis_common.h"
36
37#include "mantis_hif.h"
38#include "mantis_link.h" /* temporary due to physical layer stuff */
39
40#include "mantis_reg.h"
41
42
43static int mantis_hif_sbuf_opdone_wait(struct mantis_ca *ca)
44{
45 struct mantis_pci *mantis = ca->ca_priv;
46 int rc = 0;
47
48 if (wait_event_timeout(ca->hif_opdone_wq,
49 ca->hif_event & MANTIS_SBUF_OPDONE,
50 msecs_to_jiffies(500)) == -ERESTARTSYS) {
51
52 dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): Smart buffer operation timeout !", mantis->num);
53 rc = -EREMOTEIO;
54 }
55 dprintk(MANTIS_DEBUG, 1, "Smart Buffer Operation complete");
56 ca->hif_event &= ~MANTIS_SBUF_OPDONE;
57 return rc;
58}
59
60static int mantis_hif_write_wait(struct mantis_ca *ca)
61{
62 struct mantis_pci *mantis = ca->ca_priv;
63 u32 opdone = 0, timeout = 0;
64 int rc = 0;
65
66 if (wait_event_timeout(ca->hif_write_wq,
67 mantis->gpif_status & MANTIS_GPIF_WRACK,
68 msecs_to_jiffies(500)) == -ERESTARTSYS) {
69
70 dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): Write ACK timed out !", mantis->num);
71 rc = -EREMOTEIO;
72 }
73 dprintk(MANTIS_DEBUG, 1, "Write Acknowledged");
74 mantis->gpif_status &= ~MANTIS_GPIF_WRACK;
75 while (!opdone) {
76 opdone = (mmread(MANTIS_GPIF_STATUS) & MANTIS_SBUF_OPDONE);
77 udelay(500);
78 timeout++;
79 if (timeout > 100) {
80 dprintk(MANTIS_ERROR, 1, "Adater(%d) Slot(0): Write operation timed out!", mantis->num);
81 rc = -ETIMEDOUT;
82 break;
83 }
84 }
85 dprintk(MANTIS_DEBUG, 1, "HIF Write success");
86 return rc;
87}
88
89
90int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr)
91{
92 struct mantis_pci *mantis = ca->ca_priv;
93 u32 hif_addr = 0, data, count = 4;
94
95 dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF Mem Read", mantis->num);
96 mutex_lock(&ca->ca_lock);
97 hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
98 hif_addr &= ~MANTIS_GPIF_PCMCIAIOM;
99 hif_addr |= MANTIS_HIF_STATUS;
100 hif_addr |= addr;
101
102 mmwrite(hif_addr, MANTIS_GPIF_BRADDR);
103 mmwrite(count, MANTIS_GPIF_BRBYTES);
104 udelay(20);
105 mmwrite(hif_addr | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR);
106
107 if (mantis_hif_sbuf_opdone_wait(ca) != 0) {
108 dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): GPIF Smart Buffer operation failed", mantis->num);
109 mutex_unlock(&ca->ca_lock);
110 return -EREMOTEIO;
111 }
112 data = mmread(MANTIS_GPIF_DIN);
113 mutex_unlock(&ca->ca_lock);
114 dprintk(MANTIS_DEBUG, 1, "Mem Read: 0x%02x", data);
115 return (data >> 24) & 0xff;
116}
117
118int mantis_hif_write_mem(struct mantis_ca *ca, u32 addr, u8 data)
119{
120 struct mantis_slot *slot = ca->slot;
121 struct mantis_pci *mantis = ca->ca_priv;
122 u32 hif_addr = 0;
123
124 dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF Mem Write", mantis->num);
125 mutex_lock(&ca->ca_lock);
126 hif_addr &= ~MANTIS_GPIF_HIFRDWRN;
127 hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
128 hif_addr &= ~MANTIS_GPIF_PCMCIAIOM;
129 hif_addr |= MANTIS_HIF_STATUS;
130 hif_addr |= addr;
131
132 mmwrite(slot->slave_cfg, MANTIS_GPIF_CFGSLA); /* Slot0 alone for now */
133 mmwrite(hif_addr, MANTIS_GPIF_ADDR);
134 mmwrite(data, MANTIS_GPIF_DOUT);
135
136 if (mantis_hif_write_wait(ca) != 0) {
137 dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num);
138 mutex_unlock(&ca->ca_lock);
139 return -EREMOTEIO;
140 }
141 dprintk(MANTIS_DEBUG, 1, "Mem Write: (0x%02x to 0x%02x)", data, addr);
142 mutex_unlock(&ca->ca_lock);
143
144 return 0;
145}
146
147int mantis_hif_read_iom(struct mantis_ca *ca, u32 addr)
148{
149 struct mantis_pci *mantis = ca->ca_priv;
150 u32 data, hif_addr = 0;
151
152 dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF I/O Read", mantis->num);
153 mutex_lock(&ca->ca_lock);
154 hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
155 hif_addr |= MANTIS_GPIF_PCMCIAIOM;
156 hif_addr |= MANTIS_HIF_STATUS;
157 hif_addr |= addr;
158
159 mmwrite(hif_addr, MANTIS_GPIF_BRADDR);
160 mmwrite(1, MANTIS_GPIF_BRBYTES);
161 udelay(20);
162 mmwrite(hif_addr | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR);
163
164 if (mantis_hif_sbuf_opdone_wait(ca) != 0) {
165 dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num);
166 mutex_unlock(&ca->ca_lock);
167 return -EREMOTEIO;
168 }
169 data = mmread(MANTIS_GPIF_DIN);
170 dprintk(MANTIS_DEBUG, 1, "I/O Read: 0x%02x", data);
171 udelay(50);
172 mutex_unlock(&ca->ca_lock);
173
174 return (u8) data;
175}
176
177int mantis_hif_write_iom(struct mantis_ca *ca, u32 addr, u8 data)
178{
179 struct mantis_pci *mantis = ca->ca_priv;
180 u32 hif_addr = 0;
181
182 dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF I/O Write", mantis->num);
183 mutex_lock(&ca->ca_lock);
184 hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
185 hif_addr &= ~MANTIS_GPIF_HIFRDWRN;
186 hif_addr |= MANTIS_GPIF_PCMCIAIOM;
187 hif_addr |= MANTIS_HIF_STATUS;
188 hif_addr |= addr;
189
190 mmwrite(hif_addr, MANTIS_GPIF_ADDR);
191 mmwrite(data, MANTIS_GPIF_DOUT);
192
193 if (mantis_hif_write_wait(ca) != 0) {
194 dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num);
195 mutex_unlock(&ca->ca_lock);
196 return -EREMOTEIO;
197 }
198 dprintk(MANTIS_DEBUG, 1, "I/O Write: (0x%02x to 0x%02x)", data, addr);
199 mutex_unlock(&ca->ca_lock);
200 udelay(50);
201
202 return 0;
203}
204
205int mantis_hif_init(struct mantis_ca *ca)
206{
207 struct mantis_slot *slot = ca->slot;
208 struct mantis_pci *mantis = ca->ca_priv;
209 u32 irqcfg;
210
211 slot[0].slave_cfg = 0x70773028;
212 dprintk(MANTIS_ERROR, 1, "Adapter(%d) Initializing Mantis Host Interface", mantis->num);
213
214 mutex_lock(&ca->ca_lock);
215 irqcfg = mmread(MANTIS_GPIF_IRQCFG);
216 irqcfg = MANTIS_MASK_BRRDY |
217 MANTIS_MASK_WRACK |
218 MANTIS_MASK_EXTIRQ |
219 MANTIS_MASK_WSTO |
220 MANTIS_MASK_OTHERR |
221 MANTIS_MASK_OVFLW;
222
223 mmwrite(irqcfg, MANTIS_GPIF_IRQCFG);
224 mutex_unlock(&ca->ca_lock);
225
226 return 0;
227}
228
229void mantis_hif_exit(struct mantis_ca *ca)
230{
231 struct mantis_pci *mantis = ca->ca_priv;
232 u32 irqcfg;
233
234 dprintk(MANTIS_ERROR, 1, "Adapter(%d) Exiting Mantis Host Interface", mantis->num);
235 mutex_lock(&ca->ca_lock);
236 irqcfg = mmread(MANTIS_GPIF_IRQCFG);
237 irqcfg &= ~MANTIS_MASK_BRRDY;
238 mmwrite(irqcfg, MANTIS_GPIF_IRQCFG);
239 mutex_unlock(&ca->ca_lock);
240}
diff --git a/drivers/media/dvb/mantis/mantis_hif.h b/drivers/media/dvb/mantis/mantis_hif.h
new file mode 100644
index 000000000000..9094f9ed2362
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_hif.h
@@ -0,0 +1,29 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_HIF_H
22#define __MANTIS_HIF_H
23
24#define MANTIS_HIF_MEMRD 1
25#define MANTIS_HIF_MEMWR 2
26#define MANTIS_HIF_IOMRD 3
27#define MANTIS_HIF_IOMWR 4
28
29#endif /* __MANTIS_HIF_H */
diff --git a/drivers/media/dvb/mantis/mantis_i2c.c b/drivers/media/dvb/mantis/mantis_i2c.c
new file mode 100644
index 000000000000..7870bcf9689a
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_i2c.c
@@ -0,0 +1,267 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <asm/io.h>
22#include <linux/ioport.h>
23#include <linux/pci.h>
24#include <linux/i2c.h>
25
26#include "dmxdev.h"
27#include "dvbdev.h"
28#include "dvb_demux.h"
29#include "dvb_frontend.h"
30#include "dvb_net.h"
31
32#include "mantis_common.h"
33#include "mantis_reg.h"
34#include "mantis_i2c.h"
35
36#define TRIALS 10000
37
38static int mantis_i2c_read(struct mantis_pci *mantis, const struct i2c_msg *msg)
39{
40 u32 rxd, i, stat, trials;
41
42 dprintk(MANTIS_INFO, 0, " %s: Address=[0x%02x] <R>[ ",
43 __func__, msg->addr);
44
45 for (i = 0; i < msg->len; i++) {
46 rxd = (msg->addr << 25) | (1 << 24)
47 | MANTIS_I2C_RATE_3
48 | MANTIS_I2C_STOP
49 | MANTIS_I2C_PGMODE;
50
51 if (i == (msg->len - 1))
52 rxd &= ~MANTIS_I2C_STOP;
53
54 mmwrite(MANTIS_INT_I2CDONE, MANTIS_INT_STAT);
55 mmwrite(rxd, MANTIS_I2CDATA_CTL);
56
57 /* wait for xfer completion */
58 for (trials = 0; trials < TRIALS; trials++) {
59 stat = mmread(MANTIS_INT_STAT);
60 if (stat & MANTIS_INT_I2CDONE)
61 break;
62 }
63
64 dprintk(MANTIS_TMG, 0, "I2CDONE: trials=%d\n", trials);
65
66 /* wait for xfer completion */
67 for (trials = 0; trials < TRIALS; trials++) {
68 stat = mmread(MANTIS_INT_STAT);
69 if (stat & MANTIS_INT_I2CRACK)
70 break;
71 }
72
73 dprintk(MANTIS_TMG, 0, "I2CRACK: trials=%d\n", trials);
74
75 rxd = mmread(MANTIS_I2CDATA_CTL);
76 msg->buf[i] = (u8)((rxd >> 8) & 0xFF);
77 dprintk(MANTIS_INFO, 0, "%02x ", msg->buf[i]);
78 }
79 dprintk(MANTIS_INFO, 0, "]\n");
80
81 return 0;
82}
83
84static int mantis_i2c_write(struct mantis_pci *mantis, const struct i2c_msg *msg)
85{
86 int i;
87 u32 txd = 0, stat, trials;
88
89 dprintk(MANTIS_INFO, 0, " %s: Address=[0x%02x] <W>[ ",
90 __func__, msg->addr);
91
92 for (i = 0; i < msg->len; i++) {
93 dprintk(MANTIS_INFO, 0, "%02x ", msg->buf[i]);
94 txd = (msg->addr << 25) | (msg->buf[i] << 8)
95 | MANTIS_I2C_RATE_3
96 | MANTIS_I2C_STOP
97 | MANTIS_I2C_PGMODE;
98
99 if (i == (msg->len - 1))
100 txd &= ~MANTIS_I2C_STOP;
101
102 mmwrite(MANTIS_INT_I2CDONE, MANTIS_INT_STAT);
103 mmwrite(txd, MANTIS_I2CDATA_CTL);
104
105 /* wait for xfer completion */
106 for (trials = 0; trials < TRIALS; trials++) {
107 stat = mmread(MANTIS_INT_STAT);
108 if (stat & MANTIS_INT_I2CDONE)
109 break;
110 }
111
112 dprintk(MANTIS_TMG, 0, "I2CDONE: trials=%d\n", trials);
113
114 /* wait for xfer completion */
115 for (trials = 0; trials < TRIALS; trials++) {
116 stat = mmread(MANTIS_INT_STAT);
117 if (stat & MANTIS_INT_I2CRACK)
118 break;
119 }
120
121 dprintk(MANTIS_TMG, 0, "I2CRACK: trials=%d\n", trials);
122 }
123 dprintk(MANTIS_INFO, 0, "]\n");
124
125 return 0;
126}
127
128static int mantis_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
129{
130 int ret = 0, i = 0, trials;
131 u32 stat, data, txd;
132 struct mantis_pci *mantis;
133 struct mantis_hwconfig *config;
134
135 mantis = i2c_get_adapdata(adapter);
136 BUG_ON(!mantis);
137 config = mantis->hwconfig;
138 BUG_ON(!config);
139
140 dprintk(MANTIS_DEBUG, 1, "Messages:%d", num);
141 mutex_lock(&mantis->i2c_lock);
142
143 while (i < num) {
144 /* Byte MODE */
145 if ((config->i2c_mode & MANTIS_BYTE_MODE) &&
146 ((i + 1) < num) &&
147 (msgs[i].len < 2) &&
148 (msgs[i + 1].len < 2) &&
149 (msgs[i + 1].flags & I2C_M_RD)) {
150
151 dprintk(MANTIS_DEBUG, 0, " Byte MODE:\n");
152
153 /* Read operation */
154 txd = msgs[i].addr << 25 | (0x1 << 24)
155 | (msgs[i].buf[0] << 16)
156 | MANTIS_I2C_RATE_3;
157
158 mmwrite(txd, MANTIS_I2CDATA_CTL);
159 /* wait for xfer completion */
160 for (trials = 0; trials < TRIALS; trials++) {
161 stat = mmread(MANTIS_INT_STAT);
162 if (stat & MANTIS_INT_I2CDONE)
163 break;
164 }
165
166 /* check for xfer completion */
167 if (stat & MANTIS_INT_I2CDONE) {
168 /* check xfer was acknowledged */
169 if (stat & MANTIS_INT_I2CRACK) {
170 data = mmread(MANTIS_I2CDATA_CTL);
171 msgs[i + 1].buf[0] = (data >> 8) & 0xff;
172 dprintk(MANTIS_DEBUG, 0, " Byte <%d> RXD=0x%02x [%02x]\n", 0x0, data, msgs[i + 1].buf[0]);
173 } else {
174 /* I/O error */
175 dprintk(MANTIS_ERROR, 1, " I/O error, LINE:%d", __LINE__);
176 ret = -EIO;
177 break;
178 }
179 } else {
180 /* I/O error */
181 dprintk(MANTIS_ERROR, 1, " I/O error, LINE:%d", __LINE__);
182 ret = -EIO;
183 break;
184 }
185 i += 2; /* Write/Read operation in one go */
186 }
187
188 if (i < num) {
189 if (msgs[i].flags & I2C_M_RD)
190 ret = mantis_i2c_read(mantis, &msgs[i]);
191 else
192 ret = mantis_i2c_write(mantis, &msgs[i]);
193
194 i++;
195 if (ret < 0)
196 goto bail_out;
197 }
198
199 }
200
201 mutex_unlock(&mantis->i2c_lock);
202
203 return num;
204
205bail_out:
206 mutex_unlock(&mantis->i2c_lock);
207 return ret;
208}
209
210static u32 mantis_i2c_func(struct i2c_adapter *adapter)
211{
212 return I2C_FUNC_SMBUS_EMUL;
213}
214
215static struct i2c_algorithm mantis_algo = {
216 .master_xfer = mantis_i2c_xfer,
217 .functionality = mantis_i2c_func,
218};
219
220int __devinit mantis_i2c_init(struct mantis_pci *mantis)
221{
222 u32 intstat, intmask;
223 struct i2c_adapter *i2c_adapter = &mantis->adapter;
224 struct pci_dev *pdev = mantis->pdev;
225
226 init_waitqueue_head(&mantis->i2c_wq);
227 mutex_init(&mantis->i2c_lock);
228 strncpy(i2c_adapter->name, "Mantis I2C", sizeof(i2c_adapter->name));
229 i2c_set_adapdata(i2c_adapter, mantis);
230
231 i2c_adapter->owner = THIS_MODULE;
232 i2c_adapter->class = I2C_CLASS_TV_DIGITAL;
233 i2c_adapter->algo = &mantis_algo;
234 i2c_adapter->algo_data = NULL;
235 i2c_adapter->timeout = 500;
236 i2c_adapter->retries = 3;
237 i2c_adapter->dev.parent = &pdev->dev;
238
239 mantis->i2c_rc = i2c_add_adapter(i2c_adapter);
240 if (mantis->i2c_rc < 0)
241 return mantis->i2c_rc;
242
243 dprintk(MANTIS_DEBUG, 1, "Initializing I2C ..");
244
245 intstat = mmread(MANTIS_INT_STAT);
246 intmask = mmread(MANTIS_INT_MASK);
247 mmwrite(intstat, MANTIS_INT_STAT);
248 dprintk(MANTIS_DEBUG, 1, "Disabling I2C interrupt");
249 intmask = mmread(MANTIS_INT_MASK);
250 mmwrite((intmask & ~MANTIS_INT_I2CDONE), MANTIS_INT_MASK);
251
252 return 0;
253}
254EXPORT_SYMBOL_GPL(mantis_i2c_init);
255
256int mantis_i2c_exit(struct mantis_pci *mantis)
257{
258 u32 intmask;
259
260 dprintk(MANTIS_DEBUG, 1, "Disabling I2C interrupt");
261 intmask = mmread(MANTIS_INT_MASK);
262 mmwrite((intmask & ~MANTIS_INT_I2CDONE), MANTIS_INT_MASK);
263
264 dprintk(MANTIS_DEBUG, 1, "Removing I2C adapter");
265 return i2c_del_adapter(&mantis->adapter);
266}
267EXPORT_SYMBOL_GPL(mantis_i2c_exit);
diff --git a/drivers/media/dvb/mantis/mantis_i2c.h b/drivers/media/dvb/mantis/mantis_i2c.h
new file mode 100644
index 000000000000..1342df2faed8
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_i2c.h
@@ -0,0 +1,30 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_I2C_H
22#define __MANTIS_I2C_H
23
24#define I2C_STOP (1 << 0)
25#define I2C_READ (1 << 1)
26
27extern int mantis_i2c_init(struct mantis_pci *mantis);
28extern int mantis_i2c_exit(struct mantis_pci *mantis);
29
30#endif /* __MANTIS_I2C_H */
diff --git a/drivers/media/dvb/mantis/mantis_input.c b/drivers/media/dvb/mantis/mantis_input.c
new file mode 100644
index 000000000000..6a9df779441f
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_input.c
@@ -0,0 +1,148 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/input.h>
22#include <media/ir-common.h>
23#include <linux/pci.h>
24
25#include "dmxdev.h"
26#include "dvbdev.h"
27#include "dvb_demux.h"
28#include "dvb_frontend.h"
29#include "dvb_net.h"
30
31#include "mantis_common.h"
32#include "mantis_reg.h"
33#include "mantis_uart.h"
34
35static struct ir_scancode mantis_ir_table[] = {
36 { 0x29, KEY_POWER },
37 { 0x28, KEY_FAVORITES },
38 { 0x30, KEY_TEXT },
39 { 0x17, KEY_INFO }, /* Preview */
40 { 0x23, KEY_EPG },
41 { 0x3b, KEY_F22 }, /* Record List */
42 { 0x3c, KEY_1 },
43 { 0x3e, KEY_2 },
44 { 0x39, KEY_3 },
45 { 0x36, KEY_4 },
46 { 0x22, KEY_5 },
47 { 0x20, KEY_6 },
48 { 0x32, KEY_7 },
49 { 0x26, KEY_8 },
50 { 0x24, KEY_9 },
51 { 0x2a, KEY_0 },
52
53 { 0x33, KEY_CANCEL },
54 { 0x2c, KEY_BACK },
55 { 0x15, KEY_CLEAR },
56 { 0x3f, KEY_TAB },
57 { 0x10, KEY_ENTER },
58 { 0x14, KEY_UP },
59 { 0x0d, KEY_RIGHT },
60 { 0x0e, KEY_DOWN },
61 { 0x11, KEY_LEFT },
62
63 { 0x21, KEY_VOLUMEUP },
64 { 0x35, KEY_VOLUMEDOWN },
65 { 0x3d, KEY_CHANNELDOWN },
66 { 0x3a, KEY_CHANNELUP },
67 { 0x2e, KEY_RECORD },
68 { 0x2b, KEY_PLAY },
69 { 0x13, KEY_PAUSE },
70 { 0x25, KEY_STOP },
71
72 { 0x1f, KEY_REWIND },
73 { 0x2d, KEY_FASTFORWARD },
74 { 0x1e, KEY_PREVIOUS }, /* Replay |< */
75 { 0x1d, KEY_NEXT }, /* Skip >| */
76
77 { 0x0b, KEY_CAMERA }, /* Capture */
78 { 0x0f, KEY_LANGUAGE }, /* SAP */
79 { 0x18, KEY_MODE }, /* PIP */
80 { 0x12, KEY_ZOOM }, /* Full screen */
81 { 0x1c, KEY_SUBTITLE },
82 { 0x2f, KEY_MUTE },
83 { 0x16, KEY_F20 }, /* L/R */
84 { 0x38, KEY_F21 }, /* Hibernate */
85
86 { 0x37, KEY_SWITCHVIDEOMODE }, /* A/V */
87 { 0x31, KEY_AGAIN }, /* Recall */
88 { 0x1a, KEY_KPPLUS }, /* Zoom+ */
89 { 0x19, KEY_KPMINUS }, /* Zoom- */
90 { 0x27, KEY_RED },
91 { 0x0C, KEY_GREEN },
92 { 0x01, KEY_YELLOW },
93 { 0x00, KEY_BLUE },
94};
95
96struct ir_scancode_table ir_mantis = {
97 .scan = mantis_ir_table,
98 .size = ARRAY_SIZE(mantis_ir_table),
99};
100EXPORT_SYMBOL_GPL(ir_mantis);
101
102int mantis_input_init(struct mantis_pci *mantis)
103{
104 struct input_dev *rc;
105 struct ir_input_state rc_state;
106 char name[80], dev[80];
107 int err;
108
109 rc = input_allocate_device();
110 if (!rc) {
111 dprintk(MANTIS_ERROR, 1, "Input device allocate failed");
112 return -ENOMEM;
113 }
114
115 sprintf(name, "Mantis %s IR receiver", mantis->hwconfig->model_name);
116 sprintf(dev, "pci-%s/ir0", pci_name(mantis->pdev));
117
118 rc->name = name;
119 rc->phys = dev;
120
121 ir_input_init(rc, &rc_state, IR_TYPE_OTHER);
122
123 rc->id.bustype = BUS_PCI;
124 rc->id.vendor = mantis->vendor_id;
125 rc->id.product = mantis->device_id;
126 rc->id.version = 1;
127 rc->dev = mantis->pdev->dev;
128
129 err = ir_input_register(rc, &ir_mantis);
130 if (err) {
131 dprintk(MANTIS_ERROR, 1, "IR device registration failed, ret = %d", err);
132 input_free_device(rc);
133 return -ENODEV;
134 }
135
136 mantis->rc = rc;
137
138 return 0;
139}
140
141int mantis_exit(struct mantis_pci *mantis)
142{
143 struct input_dev *rc = mantis->rc;
144
145 ir_input_unregister(rc);
146
147 return 0;
148}
diff --git a/drivers/media/dvb/mantis/mantis_ioc.c b/drivers/media/dvb/mantis/mantis_ioc.c
new file mode 100644
index 000000000000..de148ded52d8
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ioc.c
@@ -0,0 +1,130 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/kernel.h>
22#include <linux/i2c.h>
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/interrupt.h>
27
28#include "dmxdev.h"
29#include "dvbdev.h"
30#include "dvb_demux.h"
31#include "dvb_frontend.h"
32#include "dvb_net.h"
33
34#include "mantis_common.h"
35#include "mantis_reg.h"
36#include "mantis_ioc.h"
37
38static int read_eeprom_bytes(struct mantis_pci *mantis, u8 reg, u8 *data, u8 length)
39{
40 struct i2c_adapter *adapter = &mantis->adapter;
41 int err;
42 u8 buf = reg;
43
44 struct i2c_msg msg[] = {
45 { .addr = 0x50, .flags = 0, .buf = &buf, .len = 1 },
46 { .addr = 0x50, .flags = I2C_M_RD, .buf = data, .len = length },
47 };
48
49 err = i2c_transfer(adapter, msg, 2);
50 if (err < 0) {
51 dprintk(MANTIS_ERROR, 1, "ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >",
52 err, data[0], data[1]);
53
54 return err;
55 }
56
57 return 0;
58}
59int mantis_get_mac(struct mantis_pci *mantis)
60{
61 int err;
62 u8 mac_addr[6] = {0};
63
64 err = read_eeprom_bytes(mantis, 0x08, mac_addr, 6);
65 if (err < 0) {
66 dprintk(MANTIS_ERROR, 1, "ERROR: Mantis EEPROM read error <%d>", err);
67
68 return err;
69 }
70
71 dprintk(MANTIS_ERROR, 0,
72 " MAC Address=[%02x:%02x:%02x:%02x:%02x:%02x]\n",
73 mac_addr[0],
74 mac_addr[1],
75 mac_addr[2],
76 mac_addr[3],
77 mac_addr[4],
78 mac_addr[5]);
79
80 return 0;
81}
82EXPORT_SYMBOL_GPL(mantis_get_mac);
83
84/* Turn the given bit on or off. */
85void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value)
86{
87 u32 cur;
88
89 dprintk(MANTIS_DEBUG, 1, "Set Bit <%d> to <%d>", bitpos, value);
90 cur = mmread(MANTIS_GPIF_ADDR);
91 if (value)
92 mantis->gpio_status = cur | (1 << bitpos);
93 else
94 mantis->gpio_status = cur & (~(1 << bitpos));
95
96 dprintk(MANTIS_DEBUG, 1, "GPIO Value <%02x>", mantis->gpio_status);
97 mmwrite(mantis->gpio_status, MANTIS_GPIF_ADDR);
98 mmwrite(0x00, MANTIS_GPIF_DOUT);
99}
100EXPORT_SYMBOL_GPL(gpio_set_bits);
101
102int mantis_stream_control(struct mantis_pci *mantis, enum mantis_stream_control stream_ctl)
103{
104 u32 reg;
105
106 reg = mmread(MANTIS_CONTROL);
107 switch (stream_ctl) {
108 case STREAM_TO_HIF:
109 dprintk(MANTIS_DEBUG, 1, "Set stream to HIF");
110 reg &= 0xff - MANTIS_BYPASS;
111 mmwrite(reg, MANTIS_CONTROL);
112 reg |= MANTIS_BYPASS;
113 mmwrite(reg, MANTIS_CONTROL);
114 break;
115
116 case STREAM_TO_CAM:
117 dprintk(MANTIS_DEBUG, 1, "Set stream to CAM");
118 reg |= MANTIS_BYPASS;
119 mmwrite(reg, MANTIS_CONTROL);
120 reg &= 0xff - MANTIS_BYPASS;
121 mmwrite(reg, MANTIS_CONTROL);
122 break;
123 default:
124 dprintk(MANTIS_ERROR, 1, "Unknown MODE <%02x>", stream_ctl);
125 return -1;
126 }
127
128 return 0;
129}
130EXPORT_SYMBOL_GPL(mantis_stream_control);
diff --git a/drivers/media/dvb/mantis/mantis_ioc.h b/drivers/media/dvb/mantis/mantis_ioc.h
new file mode 100644
index 000000000000..188fe5a81614
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ioc.h
@@ -0,0 +1,51 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_IOC_H
22#define __MANTIS_IOC_H
23
24#define GPIF_A00 0x00
25#define GPIF_A01 0x01
26#define GPIF_A02 0x02
27#define GPIF_A03 0x03
28#define GPIF_A04 0x04
29#define GPIF_A05 0x05
30#define GPIF_A06 0x06
31#define GPIF_A07 0x07
32#define GPIF_A08 0x08
33#define GPIF_A09 0x09
34#define GPIF_A10 0x0a
35#define GPIF_A11 0x0b
36
37#define GPIF_A12 0x0c
38#define GPIF_A13 0x0d
39#define GPIF_A14 0x0e
40
41enum mantis_stream_control {
42 STREAM_TO_HIF = 0,
43 STREAM_TO_CAM
44};
45
46extern int mantis_get_mac(struct mantis_pci *mantis);
47extern void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value);
48
49extern int mantis_stream_control(struct mantis_pci *mantis, enum mantis_stream_control stream_ctl);
50
51#endif /* __MANTIS_IOC_H */
diff --git a/drivers/media/dvb/mantis/mantis_link.h b/drivers/media/dvb/mantis/mantis_link.h
new file mode 100644
index 000000000000..2a814774a001
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_link.h
@@ -0,0 +1,83 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_LINK_H
22#define __MANTIS_LINK_H
23
24#include <linux/mutex.h>
25#include <linux/workqueue.h>
26#include "dvb_ca_en50221.h"
27
28enum mantis_sbuf_status {
29 MANTIS_SBUF_DATA_AVAIL = 1,
30 MANTIS_SBUF_DATA_EMPTY = 2,
31 MANTIS_SBUF_DATA_OVFLW = 3
32};
33
34struct mantis_slot {
35 u32 timeout;
36 u32 slave_cfg;
37 u32 bar;
38};
39
40/* Physical layer */
41enum mantis_slot_state {
42 MODULE_INSERTED = 3,
43 MODULE_XTRACTED = 4
44};
45
46struct mantis_ca {
47 struct mantis_slot slot[4];
48
49 struct work_struct hif_evm_work;
50
51 u32 hif_event;
52 wait_queue_head_t hif_opdone_wq;
53 wait_queue_head_t hif_brrdyw_wq;
54 wait_queue_head_t hif_data_wq;
55 wait_queue_head_t hif_write_wq; /* HIF Write op */
56
57 enum mantis_sbuf_status sbuf_status;
58
59 enum mantis_slot_state slot_state;
60
61 void *ca_priv;
62
63 struct dvb_ca_en50221 en50221;
64 struct mutex ca_lock;
65};
66
67/* CA */
68extern void mantis_event_cam_plugin(struct mantis_ca *ca);
69extern void mantis_event_cam_unplug(struct mantis_ca *ca);
70extern int mantis_pcmcia_init(struct mantis_ca *ca);
71extern void mantis_pcmcia_exit(struct mantis_ca *ca);
72extern int mantis_evmgr_init(struct mantis_ca *ca);
73extern void mantis_evmgr_exit(struct mantis_ca *ca);
74
75/* HIF */
76extern int mantis_hif_init(struct mantis_ca *ca);
77extern void mantis_hif_exit(struct mantis_ca *ca);
78extern int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr);
79extern int mantis_hif_write_mem(struct mantis_ca *ca, u32 addr, u8 data);
80extern int mantis_hif_read_iom(struct mantis_ca *ca, u32 addr);
81extern int mantis_hif_write_iom(struct mantis_ca *ca, u32 addr, u8 data);
82
83#endif /* __MANTIS_LINK_H */
diff --git a/drivers/media/dvb/mantis/mantis_pci.c b/drivers/media/dvb/mantis/mantis_pci.c
new file mode 100644
index 000000000000..6c7534af6b44
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_pci.c
@@ -0,0 +1,177 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/kernel.h>
24#include <asm/io.h>
25#include <asm/pgtable.h>
26#include <asm/page.h>
27#include <linux/kmod.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/device.h>
31#include <linux/pci.h>
32
33#include <asm/irq.h>
34#include <linux/signal.h>
35#include <linux/sched.h>
36#include <linux/interrupt.h>
37
38#include "dmxdev.h"
39#include "dvbdev.h"
40#include "dvb_demux.h"
41#include "dvb_frontend.h"
42#include "dvb_net.h"
43
44#include <asm/irq.h>
45#include <linux/signal.h>
46#include <linux/sched.h>
47#include <linux/interrupt.h>
48
49#include "mantis_common.h"
50#include "mantis_reg.h"
51#include "mantis_pci.h"
52
53#define DRIVER_NAME "Mantis Core"
54
55int __devinit mantis_pci_init(struct mantis_pci *mantis)
56{
57 u8 revision, latency;
58 struct mantis_hwconfig *config = mantis->hwconfig;
59 struct pci_dev *pdev = mantis->pdev;
60 int err, ret = 0;
61
62 dprintk(MANTIS_ERROR, 0, "found a %s PCI %s device on (%02x:%02x.%x),\n",
63 config->model_name,
64 config->dev_type,
65 mantis->pdev->bus->number,
66 PCI_SLOT(mantis->pdev->devfn),
67 PCI_FUNC(mantis->pdev->devfn));
68
69 err = pci_enable_device(pdev);
70 if (err != 0) {
71 ret = -ENODEV;
72 dprintk(MANTIS_ERROR, 1, "ERROR: PCI enable failed <%i>", err);
73 goto fail0;
74 }
75
76 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
77 if (err != 0) {
78 dprintk(MANTIS_ERROR, 1, "ERROR: Unable to obtain 32 bit DMA <%i>", err);
79 ret = -ENOMEM;
80 goto fail1;
81 }
82
83 pci_set_master(pdev);
84
85 if (!request_mem_region(pci_resource_start(pdev, 0),
86 pci_resource_len(pdev, 0),
87 DRIVER_NAME)) {
88
89 dprintk(MANTIS_ERROR, 1, "ERROR: BAR0 Request failed !");
90 ret = -ENODEV;
91 goto fail1;
92 }
93
94 mantis->mmio = ioremap(pci_resource_start(pdev, 0),
95 pci_resource_len(pdev, 0));
96
97 if (!mantis->mmio) {
98 dprintk(MANTIS_ERROR, 1, "ERROR: BAR0 remap failed !");
99 ret = -ENODEV;
100 goto fail2;
101 }
102
103 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency);
104 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
105 mantis->latency = latency;
106 mantis->revision = revision;
107
108 dprintk(MANTIS_ERROR, 0, " Mantis Rev %d [%04x:%04x], ",
109 mantis->revision,
110 mantis->pdev->subsystem_vendor,
111 mantis->pdev->subsystem_device);
112
113 dprintk(MANTIS_ERROR, 0,
114 "irq: %d, latency: %d\n memory: 0x%lx, mmio: 0x%p\n",
115 mantis->pdev->irq,
116 mantis->latency,
117 mantis->mantis_addr,
118 mantis->mmio);
119
120 err = request_irq(pdev->irq,
121 config->irq_handler,
122 IRQF_SHARED,
123 DRIVER_NAME,
124 mantis);
125
126 if (err != 0) {
127
128 dprintk(MANTIS_ERROR, 1, "ERROR: IRQ registration failed ! <%d>", err);
129 ret = -ENODEV;
130 goto fail3;
131 }
132
133 pci_set_drvdata(pdev, mantis);
134 return ret;
135
136 /* Error conditions */
137fail3:
138 dprintk(MANTIS_ERROR, 1, "ERROR: <%d> I/O unmap", ret);
139 if (mantis->mmio)
140 iounmap(mantis->mmio);
141
142fail2:
143 dprintk(MANTIS_ERROR, 1, "ERROR: <%d> releasing regions", ret);
144 release_mem_region(pci_resource_start(pdev, 0),
145 pci_resource_len(pdev, 0));
146
147fail1:
148 dprintk(MANTIS_ERROR, 1, "ERROR: <%d> disabling device", ret);
149 pci_disable_device(pdev);
150
151fail0:
152 dprintk(MANTIS_ERROR, 1, "ERROR: <%d> exiting", ret);
153 pci_set_drvdata(pdev, NULL);
154 return ret;
155}
156EXPORT_SYMBOL_GPL(mantis_pci_init);
157
158void mantis_pci_exit(struct mantis_pci *mantis)
159{
160 struct pci_dev *pdev = mantis->pdev;
161
162 dprintk(MANTIS_NOTICE, 1, " mem: 0x%p", mantis->mmio);
163 free_irq(pdev->irq, mantis);
164 if (mantis->mmio) {
165 iounmap(mantis->mmio);
166 release_mem_region(pci_resource_start(pdev, 0),
167 pci_resource_len(pdev, 0));
168 }
169
170 pci_disable_device(pdev);
171 pci_set_drvdata(pdev, NULL);
172}
173EXPORT_SYMBOL_GPL(mantis_pci_exit);
174
175MODULE_DESCRIPTION("Mantis PCI DTV bridge driver");
176MODULE_AUTHOR("Manu Abraham");
177MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/mantis/mantis_pci.h b/drivers/media/dvb/mantis/mantis_pci.h
new file mode 100644
index 000000000000..65f004519086
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_pci.h
@@ -0,0 +1,27 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_PCI_H
22#define __MANTIS_PCI_H
23
24extern int mantis_pci_init(struct mantis_pci *mantis);
25extern void mantis_pci_exit(struct mantis_pci *mantis);
26
27#endif /* __MANTIS_PCI_H */
diff --git a/drivers/media/dvb/mantis/mantis_pcmcia.c b/drivers/media/dvb/mantis/mantis_pcmcia.c
new file mode 100644
index 000000000000..5cb545b913f6
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_pcmcia.c
@@ -0,0 +1,120 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/kernel.h>
22
23#include <linux/signal.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
26
27#include "dmxdev.h"
28#include "dvbdev.h"
29#include "dvb_demux.h"
30#include "dvb_frontend.h"
31#include "dvb_net.h"
32
33#include "mantis_common.h"
34#include "mantis_link.h" /* temporary due to physical layer stuff */
35#include "mantis_reg.h"
36
37/*
38 * If Slot state is already PLUG_IN event and we are called
39 * again, definitely it is jitter alone
40 */
41void mantis_event_cam_plugin(struct mantis_ca *ca)
42{
43 struct mantis_pci *mantis = ca->ca_priv;
44
45 u32 gpif_irqcfg;
46
47 if (ca->slot_state == MODULE_XTRACTED) {
48 dprintk(MANTIS_DEBUG, 1, "Event: CAM Plugged IN: Adapter(%d) Slot(0)", mantis->num);
49 udelay(50);
50 mmwrite(0xda000000, MANTIS_CARD_RESET);
51 gpif_irqcfg = mmread(MANTIS_GPIF_IRQCFG);
52 gpif_irqcfg |= MANTIS_MASK_PLUGOUT;
53 gpif_irqcfg &= ~MANTIS_MASK_PLUGIN;
54 mmwrite(gpif_irqcfg, MANTIS_GPIF_IRQCFG);
55 udelay(500);
56 ca->slot_state = MODULE_INSERTED;
57 }
58 udelay(100);
59}
60
61/*
62 * If Slot state is already UN_PLUG event and we are called
63 * again, definitely it is jitter alone
64 */
65void mantis_event_cam_unplug(struct mantis_ca *ca)
66{
67 struct mantis_pci *mantis = ca->ca_priv;
68
69 u32 gpif_irqcfg;
70
71 if (ca->slot_state == MODULE_INSERTED) {
72 dprintk(MANTIS_DEBUG, 1, "Event: CAM Unplugged: Adapter(%d) Slot(0)", mantis->num);
73 udelay(50);
74 mmwrite(0x00da0000, MANTIS_CARD_RESET);
75 gpif_irqcfg = mmread(MANTIS_GPIF_IRQCFG);
76 gpif_irqcfg |= MANTIS_MASK_PLUGIN;
77 gpif_irqcfg &= ~MANTIS_MASK_PLUGOUT;
78 mmwrite(gpif_irqcfg, MANTIS_GPIF_IRQCFG);
79 udelay(500);
80 ca->slot_state = MODULE_XTRACTED;
81 }
82 udelay(100);
83}
84
85int mantis_pcmcia_init(struct mantis_ca *ca)
86{
87 struct mantis_pci *mantis = ca->ca_priv;
88
89 u32 gpif_stat, card_stat;
90
91 mmwrite(mmread(MANTIS_INT_MASK) | MANTIS_INT_IRQ0, MANTIS_INT_MASK);
92 gpif_stat = mmread(MANTIS_GPIF_STATUS);
93 card_stat = mmread(MANTIS_GPIF_IRQCFG);
94
95 if (gpif_stat & MANTIS_GPIF_DETSTAT) {
96 dprintk(MANTIS_DEBUG, 1, "CAM found on Adapter(%d) Slot(0)", mantis->num);
97 mmwrite(card_stat | MANTIS_MASK_PLUGOUT, MANTIS_GPIF_IRQCFG);
98 ca->slot_state = MODULE_INSERTED;
99 dvb_ca_en50221_camchange_irq(&ca->en50221,
100 0,
101 DVB_CA_EN50221_CAMCHANGE_INSERTED);
102 } else {
103 dprintk(MANTIS_DEBUG, 1, "Empty Slot on Adapter(%d) Slot(0)", mantis->num);
104 mmwrite(card_stat | MANTIS_MASK_PLUGIN, MANTIS_GPIF_IRQCFG);
105 ca->slot_state = MODULE_XTRACTED;
106 dvb_ca_en50221_camchange_irq(&ca->en50221,
107 0,
108 DVB_CA_EN50221_CAMCHANGE_REMOVED);
109 }
110
111 return 0;
112}
113
114void mantis_pcmcia_exit(struct mantis_ca *ca)
115{
116 struct mantis_pci *mantis = ca->ca_priv;
117
118 mmwrite(mmread(MANTIS_GPIF_STATUS) & (~MANTIS_CARD_PLUGOUT | ~MANTIS_CARD_PLUGIN), MANTIS_GPIF_STATUS);
119 mmwrite(mmread(MANTIS_INT_MASK) & ~MANTIS_INT_IRQ0, MANTIS_INT_MASK);
120}
diff --git a/drivers/media/dvb/mantis/mantis_reg.h b/drivers/media/dvb/mantis/mantis_reg.h
new file mode 100644
index 000000000000..7761f9dc7fe0
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_reg.h
@@ -0,0 +1,197 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_REG_H
22#define __MANTIS_REG_H
23
24/* Interrupts */
25#define MANTIS_INT_STAT 0x00
26#define MANTIS_INT_MASK 0x04
27
28#define MANTIS_INT_RISCSTAT (0x0f << 28)
29#define MANTIS_INT_RISCEN (0x01 << 27)
30#define MANTIS_INT_I2CRACK (0x01 << 26)
31
32/* #define MANTIS_INT_GPIF (0xff << 12) */
33
34#define MANTIS_INT_PCMCIA7 (0x01 << 19)
35#define MANTIS_INT_PCMCIA6 (0x01 << 18)
36#define MANTIS_INT_PCMCIA5 (0x01 << 17)
37#define MANTIS_INT_PCMCIA4 (0x01 << 16)
38#define MANTIS_INT_PCMCIA3 (0x01 << 15)
39#define MANTIS_INT_PCMCIA2 (0x01 << 14)
40#define MANTIS_INT_PCMCIA1 (0x01 << 13)
41#define MANTIS_INT_PCMCIA0 (0x01 << 12)
42#define MANTIS_INT_IRQ1 (0x01 << 11)
43#define MANTIS_INT_IRQ0 (0x01 << 10)
44#define MANTIS_INT_OCERR (0x01 << 8)
45#define MANTIS_INT_PABORT (0x01 << 7)
46#define MANTIS_INT_RIPERR (0x01 << 6)
47#define MANTIS_INT_PPERR (0x01 << 5)
48#define MANTIS_INT_FTRGT (0x01 << 3)
49#define MANTIS_INT_RISCI (0x01 << 1)
50#define MANTIS_INT_I2CDONE (0x01 << 0)
51
52/* DMA */
53#define MANTIS_DMA_CTL 0x08
54#define MANTIS_GPIF_RD (0xff << 24)
55#define MANTIS_GPIF_WR (0xff << 16)
56#define MANTIS_CPU_DO (0x01 << 10)
57#define MANTIS_DRV_DO (0x01 << 9)
58#define MANTIS_I2C_RD (0x01 << 7)
59#define MANTIS_I2C_WR (0x01 << 6)
60#define MANTIS_DCAP_MODE (0x01 << 5)
61#define MANTIS_FIFO_TP_4 (0x00 << 3)
62#define MANTIS_FIFO_TP_8 (0x01 << 3)
63#define MANTIS_FIFO_TP_16 (0x02 << 3)
64#define MANTIS_FIFO_EN (0x01 << 2)
65#define MANTIS_DCAP_EN (0x01 << 1)
66#define MANTIS_RISC_EN (0x01 << 0)
67
68/* DEBUG */
69#define MANTIS_DEBUGREG 0x0c
70#define MANTIS_DATINV (0x0e << 7)
71#define MANTIS_TOP_DEBUGSEL (0x07 << 4)
72#define MANTIS_PCMCIA_DEBUGSEL (0x0f << 0)
73
74#define MANTIS_RISC_START 0x10
75#define MANTIS_RISC_PC 0x14
76
77/* I2C */
78#define MANTIS_I2CDATA_CTL 0x18
79#define MANTIS_I2C_RATE_1 (0x00 << 6)
80#define MANTIS_I2C_RATE_2 (0x01 << 6)
81#define MANTIS_I2C_RATE_3 (0x02 << 6)
82#define MANTIS_I2C_RATE_4 (0x03 << 6)
83#define MANTIS_I2C_STOP (0x01 << 5)
84#define MANTIS_I2C_PGMODE (0x01 << 3)
85
86/* DATA */
87#define MANTIS_CMD_DATA_R1 0x20
88#define MANTIS_CMD_DATA_3 (0xff << 24)
89#define MANTIS_CMD_DATA_2 (0xff << 16)
90#define MANTIS_CMD_DATA_1 (0xff << 8)
91#define MANTIS_CMD_DATA_0 (0xff << 0)
92
93#define MANTIS_CMD_DATA_R2 0x24
94#define MANTIS_CMD_DATA_7 (0xff << 24)
95#define MANTIS_CMD_DATA_6 (0xff << 16)
96#define MANTIS_CMD_DATA_5 (0xff << 8)
97#define MANTIS_CMD_DATA_4 (0xff << 0)
98
99#define MANTIS_CONTROL 0x28
100#define MANTIS_DET (0x01 << 7)
101#define MANTIS_DAT_CF_EN (0x01 << 6)
102#define MANTIS_ACS (0x03 << 4)
103#define MANTIS_VCCEN (0x01 << 3)
104#define MANTIS_BYPASS (0x01 << 2)
105#define MANTIS_MRST (0x01 << 1)
106#define MANTIS_CRST_INT (0x01 << 0)
107
108#define MANTIS_GPIF_CFGSLA 0x84
109#define MANTIS_GPIF_WAITSMPL (0x07 << 28)
110#define MANTIS_GPIF_BYTEADDRSUB (0x01 << 25)
111#define MANTIS_GPIF_WAITPOL (0x01 << 24)
112#define MANTIS_GPIF_NCDELAY (0x07 << 20)
113#define MANTIS_GPIF_RW2CSDELAY (0x07 << 16)
114#define MANTIS_GPIF_SLFTIMEDMODE (0x01 << 15)
115#define MANTIS_GPIF_SLFTIMEDDELY (0x7f << 8)
116#define MANTIS_GPIF_DEVTYPE (0x07 << 4)
117#define MANTIS_GPIF_BIGENDIAN (0x01 << 3)
118#define MANTIS_GPIF_FETCHCMD (0x03 << 1)
119#define MANTIS_GPIF_HWORDDEV (0x01 << 0)
120
121#define MANTIS_GPIF_WSTOPER 0x90
122#define MANTIS_GPIF_WSTOPERWREN3 (0x01 << 31)
123#define MANTIS_GPIF_PARBOOTN (0x01 << 29)
124#define MANTIS_GPIF_WSTOPERSLID3 (0x1f << 24)
125#define MANTIS_GPIF_WSTOPERWREN2 (0x01 << 23)
126#define MANTIS_GPIF_WSTOPERSLID2 (0x1f << 16)
127#define MANTIS_GPIF_WSTOPERWREN1 (0x01 << 15)
128#define MANTIS_GPIF_WSTOPERSLID1 (0x1f << 8)
129#define MANTIS_GPIF_WSTOPERWREN0 (0x01 << 7)
130#define MANTIS_GPIF_WSTOPERSLID0 (0x1f << 0)
131
132#define MANTIS_GPIF_CS2RW 0x94
133#define MANTIS_GPIF_CS2RWWREN3 (0x01 << 31)
134#define MANTIS_GPIF_CS2RWDELY3 (0x3f << 24)
135#define MANTIS_GPIF_CS2RWWREN2 (0x01 << 23)
136#define MANTIS_GPIF_CS2RWDELY2 (0x3f << 16)
137#define MANTIS_GPIF_CS2RWWREN1 (0x01 << 15)
138#define MANTIS_GPIF_CS2RWDELY1 (0x3f << 8)
139#define MANTIS_GPIF_CS2RWWREN0 (0x01 << 7)
140#define MANTIS_GPIF_CS2RWDELY0 (0x3f << 0)
141
142#define MANTIS_GPIF_IRQCFG 0x98
143#define MANTIS_GPIF_IRQPOL (0x01 << 8)
144#define MANTIS_MASK_WRACK (0x01 << 7)
145#define MANTIS_MASK_BRRDY (0x01 << 6)
146#define MANTIS_MASK_OVFLW (0x01 << 5)
147#define MANTIS_MASK_OTHERR (0x01 << 4)
148#define MANTIS_MASK_WSTO (0x01 << 3)
149#define MANTIS_MASK_EXTIRQ (0x01 << 2)
150#define MANTIS_MASK_PLUGIN (0x01 << 1)
151#define MANTIS_MASK_PLUGOUT (0x01 << 0)
152
153#define MANTIS_GPIF_STATUS 0x9c
154#define MANTIS_SBUF_KILLOP (0x01 << 15)
155#define MANTIS_SBUF_OPDONE (0x01 << 14)
156#define MANTIS_SBUF_EMPTY (0x01 << 13)
157#define MANTIS_GPIF_DETSTAT (0x01 << 9)
158#define MANTIS_GPIF_INTSTAT (0x01 << 8)
159#define MANTIS_GPIF_WRACK (0x01 << 7)
160#define MANTIS_GPIF_BRRDY (0x01 << 6)
161#define MANTIS_SBUF_OVFLW (0x01 << 5)
162#define MANTIS_GPIF_OTHERR (0x01 << 4)
163#define MANTIS_SBUF_WSTO (0x01 << 3)
164#define MANTIS_GPIF_EXTIRQ (0x01 << 2)
165#define MANTIS_CARD_PLUGIN (0x01 << 1)
166#define MANTIS_CARD_PLUGOUT (0x01 << 0)
167
168#define MANTIS_GPIF_BRADDR 0xa0
169#define MANTIS_GPIF_PCMCIAREG (0x01 << 27)
170#define MANTIS_GPIF_PCMCIAIOM (0x01 << 26)
171#define MANTIS_GPIF_BR_ADDR (0xfffffff << 0)
172
173#define MANTIS_GPIF_BRBYTES 0xa4
174#define MANTIS_GPIF_BRCNT (0xfff << 0)
175
176#define MANTIS_PCMCIA_RESET 0xa8
177#define MANTIS_PCMCIA_RSTVAL (0xff << 0)
178
179#define MANTIS_CARD_RESET 0xac
180
181#define MANTIS_GPIF_ADDR 0xb0
182#define MANTIS_GPIF_HIFRDWRN (0x01 << 31)
183#define MANTIS_GPIF_PCMCIAREG (0x01 << 27)
184#define MANTIS_GPIF_PCMCIAIOM (0x01 << 26)
185#define MANTIS_GPIF_HIFADDR (0xfffffff << 0)
186
187#define MANTIS_GPIF_DOUT 0xb4
188#define MANTIS_GPIF_HIFDOUT (0xfffffff << 0)
189
190#define MANTIS_GPIF_DIN 0xb8
191#define MANTIS_GPIF_HIFDIN (0xfffffff << 0)
192
193#define MANTIS_GPIF_SPARE 0xbc
194#define MANTIS_GPIF_LOGICRD (0xffff << 16)
195#define MANTIS_GPIF_LOGICRW (0xffff << 0)
196
197#endif /* __MANTIS_REG_H */
diff --git a/drivers/media/dvb/mantis/mantis_uart.c b/drivers/media/dvb/mantis/mantis_uart.c
new file mode 100644
index 000000000000..7d2f2398fa8b
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_uart.c
@@ -0,0 +1,186 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/kernel.h>
22#include <linux/spinlock.h>
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/interrupt.h>
27
28#include "dmxdev.h"
29#include "dvbdev.h"
30#include "dvb_demux.h"
31#include "dvb_frontend.h"
32#include "dvb_net.h"
33
34#include "mantis_common.h"
35#include "mantis_reg.h"
36#include "mantis_uart.h"
37
38struct mantis_uart_params {
39 enum mantis_baud baud_rate;
40 enum mantis_parity parity;
41};
42
43static struct {
44 char string[7];
45} rates[5] = {
46 { "9600" },
47 { "19200" },
48 { "38400" },
49 { "57600" },
50 { "115200" }
51};
52
53static struct {
54 char string[5];
55} parity[3] = {
56 { "NONE" },
57 { "ODD" },
58 { "EVEN" }
59};
60
61#define UART_MAX_BUF 16
62
63int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
64{
65 struct mantis_hwconfig *config = mantis->hwconfig;
66 u32 stat = 0, i;
67
68 /* get data */
69 for (i = 0; i < (config->bytes + 1); i++) {
70
71 stat = mmread(MANTIS_UART_STAT);
72
73 if (stat & MANTIS_UART_RXFIFO_FULL) {
74 dprintk(MANTIS_ERROR, 1, "RX Fifo FULL");
75 }
76 data[i] = mmread(MANTIS_UART_RXD) & 0x3f;
77
78 dprintk(MANTIS_DEBUG, 1, "Reading ... <%02x>", data[i] & 0x3f);
79
80 if (data[i] & (1 << 7)) {
81 dprintk(MANTIS_ERROR, 1, "UART framing error");
82 return -EINVAL;
83 }
84 if (data[i] & (1 << 6)) {
85 dprintk(MANTIS_ERROR, 1, "UART parity error");
86 return -EINVAL;
87 }
88 }
89
90 return 0;
91}
92
93static void mantis_uart_work(struct work_struct *work)
94{
95 struct mantis_pci *mantis = container_of(work, struct mantis_pci, uart_work);
96 struct mantis_hwconfig *config = mantis->hwconfig;
97 u8 buf[16];
98 int i;
99
100 mantis_uart_read(mantis, buf);
101
102 for (i = 0; i < (config->bytes + 1); i++)
103 dprintk(MANTIS_INFO, 1, "UART BUF:%d <%02x> ", i, buf[i]);
104
105 dprintk(MANTIS_DEBUG, 0, "\n");
106}
107
108static int mantis_uart_setup(struct mantis_pci *mantis,
109 struct mantis_uart_params *params)
110{
111 u32 reg;
112
113 mmwrite((mmread(MANTIS_UART_CTL) | (params->parity & 0x3)), MANTIS_UART_CTL);
114
115 reg = mmread(MANTIS_UART_BAUD);
116
117 switch (params->baud_rate) {
118 case MANTIS_BAUD_9600:
119 reg |= 0xd8;
120 break;
121 case MANTIS_BAUD_19200:
122 reg |= 0x6c;
123 break;
124 case MANTIS_BAUD_38400:
125 reg |= 0x36;
126 break;
127 case MANTIS_BAUD_57600:
128 reg |= 0x23;
129 break;
130 case MANTIS_BAUD_115200:
131 reg |= 0x11;
132 break;
133 default:
134 return -EINVAL;
135 }
136
137 mmwrite(reg, MANTIS_UART_BAUD);
138
139 return 0;
140}
141
142int mantis_uart_init(struct mantis_pci *mantis)
143{
144 struct mantis_hwconfig *config = mantis->hwconfig;
145 struct mantis_uart_params params;
146
147 /* default parity: */
148 params.baud_rate = config->baud_rate;
149 params.parity = config->parity;
150 dprintk(MANTIS_INFO, 1, "Initializing UART @ %sbps parity:%s",
151 rates[params.baud_rate].string,
152 parity[params.parity].string);
153
154 init_waitqueue_head(&mantis->uart_wq);
155 spin_lock_init(&mantis->uart_lock);
156
157 INIT_WORK(&mantis->uart_work, mantis_uart_work);
158
159 /* disable interrupt */
160 mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
161
162 mantis_uart_setup(mantis, &params);
163
164 /* default 1 byte */
165 mmwrite((mmread(MANTIS_UART_BAUD) | (config->bytes << 8)), MANTIS_UART_BAUD);
166
167 /* flush buffer */
168 mmwrite((mmread(MANTIS_UART_CTL) | MANTIS_UART_RXFLUSH), MANTIS_UART_CTL);
169
170 /* enable interrupt */
171 mmwrite(mmread(MANTIS_INT_MASK) | 0x800, MANTIS_INT_MASK);
172 mmwrite(mmread(MANTIS_UART_CTL) | MANTIS_UART_RXINT, MANTIS_UART_CTL);
173
174 schedule_work(&mantis->uart_work);
175 dprintk(MANTIS_DEBUG, 1, "UART succesfully initialized");
176
177 return 0;
178}
179EXPORT_SYMBOL_GPL(mantis_uart_init);
180
181void mantis_uart_exit(struct mantis_pci *mantis)
182{
183 /* disable interrupt */
184 mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
185}
186EXPORT_SYMBOL_GPL(mantis_uart_exit);
diff --git a/drivers/media/dvb/mantis/mantis_uart.h b/drivers/media/dvb/mantis/mantis_uart.h
new file mode 100644
index 000000000000..ffb62a0a5a13
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_uart.h
@@ -0,0 +1,58 @@
1/*
2 Mantis PCI bridge driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_UART_H
22#define __MANTIS_UART_H
23
24#define MANTIS_UART_CTL 0xe0
25#define MANTIS_UART_RXINT (1 << 4)
26#define MANTIS_UART_RXFLUSH (1 << 2)
27
28#define MANTIS_UART_RXD 0xe8
29#define MANTIS_UART_BAUD 0xec
30
31#define MANTIS_UART_STAT 0xf0
32#define MANTIS_UART_RXFIFO_DATA (1 << 7)
33#define MANTIS_UART_RXFIFO_EMPTY (1 << 6)
34#define MANTIS_UART_RXFIFO_FULL (1 << 3)
35#define MANTIS_UART_FRAME_ERR (1 << 2)
36#define MANTIS_UART_PARITY_ERR (1 << 1)
37#define MANTIS_UART_RXTHRESH_INT (1 << 0)
38
39enum mantis_baud {
40 MANTIS_BAUD_9600 = 0,
41 MANTIS_BAUD_19200,
42 MANTIS_BAUD_38400,
43 MANTIS_BAUD_57600,
44 MANTIS_BAUD_115200
45};
46
47enum mantis_parity {
48 MANTIS_PARITY_NONE = 0,
49 MANTIS_PARITY_EVEN,
50 MANTIS_PARITY_ODD,
51};
52
53struct mantis_pci;
54
55extern int mantis_uart_init(struct mantis_pci *mantis);
56extern void mantis_uart_exit(struct mantis_pci *mantis);
57
58#endif /* __MANTIS_UART_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp1033.c b/drivers/media/dvb/mantis/mantis_vp1033.c
new file mode 100644
index 000000000000..4a723bda0031
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1033.c
@@ -0,0 +1,212 @@
1/*
2 Mantis VP-1033 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24
25#include "dmxdev.h"
26#include "dvbdev.h"
27#include "dvb_demux.h"
28#include "dvb_frontend.h"
29#include "dvb_net.h"
30
31#include "stv0299.h"
32#include "mantis_common.h"
33#include "mantis_ioc.h"
34#include "mantis_dvb.h"
35#include "mantis_vp1033.h"
36#include "mantis_reg.h"
37
38u8 lgtdqcs001f_inittab[] = {
39 0x01, 0x15,
40 0x02, 0x00,
41 0x03, 0x00,
42 0x04, 0x2a,
43 0x05, 0x85,
44 0x06, 0x02,
45 0x07, 0x00,
46 0x08, 0x00,
47 0x0c, 0x01,
48 0x0d, 0x81,
49 0x0e, 0x44,
50 0x0f, 0x94,
51 0x10, 0x3c,
52 0x11, 0x84,
53 0x12, 0xb9,
54 0x13, 0xb5,
55 0x14, 0x4f,
56 0x15, 0xc9,
57 0x16, 0x80,
58 0x17, 0x36,
59 0x18, 0xfb,
60 0x19, 0xcf,
61 0x1a, 0xbc,
62 0x1c, 0x2b,
63 0x1d, 0x27,
64 0x1e, 0x00,
65 0x1f, 0x0b,
66 0x20, 0xa1,
67 0x21, 0x60,
68 0x22, 0x00,
69 0x23, 0x00,
70 0x28, 0x00,
71 0x29, 0x28,
72 0x2a, 0x14,
73 0x2b, 0x0f,
74 0x2c, 0x09,
75 0x2d, 0x05,
76 0x31, 0x1f,
77 0x32, 0x19,
78 0x33, 0xfc,
79 0x34, 0x13,
80 0xff, 0xff,
81};
82
83#define MANTIS_MODEL_NAME "VP-1033"
84#define MANTIS_DEV_TYPE "DVB-S/DSS"
85
86int lgtdqcs001f_tuner_set(struct dvb_frontend *fe,
87 struct dvb_frontend_parameters *params)
88{
89 struct mantis_pci *mantis = fe->dvb->priv;
90 struct i2c_adapter *adapter = &mantis->adapter;
91
92 u8 buf[4];
93 u32 div;
94
95
96 struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf)};
97
98 div = params->frequency / 250;
99
100 buf[0] = (div >> 8) & 0x7f;
101 buf[1] = div & 0xff;
102 buf[2] = 0x83;
103 buf[3] = 0xc0;
104
105 if (params->frequency < 1531000)
106 buf[3] |= 0x04;
107 else
108 buf[3] &= ~0x04;
109 if (i2c_transfer(adapter, &msg, 1) < 0) {
110 dprintk(MANTIS_ERROR, 1, "Write: I2C Transfer failed");
111 return -EIO;
112 }
113 msleep_interruptible(100);
114
115 return 0;
116}
117
118int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
119 u32 srate, u32 ratio)
120{
121 u8 aclk = 0;
122 u8 bclk = 0;
123
124 if (srate < 1500000) {
125 aclk = 0xb7;
126 bclk = 0x47;
127 } else if (srate < 3000000) {
128 aclk = 0xb7;
129 bclk = 0x4b;
130 } else if (srate < 7000000) {
131 aclk = 0xb7;
132 bclk = 0x4f;
133 } else if (srate < 14000000) {
134 aclk = 0xb7;
135 bclk = 0x53;
136 } else if (srate < 30000000) {
137 aclk = 0xb6;
138 bclk = 0x53;
139 } else if (srate < 45000000) {
140 aclk = 0xb4;
141 bclk = 0x51;
142 }
143 stv0299_writereg(fe, 0x13, aclk);
144 stv0299_writereg(fe, 0x14, bclk);
145
146 stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
147 stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
148 stv0299_writereg(fe, 0x21, ratio & 0xf0);
149
150 return 0;
151}
152
153struct stv0299_config lgtdqcs001f_config = {
154 .demod_address = 0x68,
155 .inittab = lgtdqcs001f_inittab,
156 .mclk = 88000000UL,
157 .invert = 0,
158 .skip_reinit = 0,
159 .volt13_op0_op1 = STV0299_VOLT13_OP0,
160 .min_delay_ms = 100,
161 .set_symbol_rate = lgtdqcs001f_set_symbol_rate,
162};
163
164static int vp1033_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
165{
166 struct i2c_adapter *adapter = &mantis->adapter;
167
168 int err = 0;
169
170 err = mantis_frontend_power(mantis, POWER_ON);
171 if (err == 0) {
172 mantis_frontend_soft_reset(mantis);
173 msleep(250);
174
175 dprintk(MANTIS_ERROR, 1, "Probing for STV0299 (DVB-S)");
176 fe = stv0299_attach(&lgtdqcs001f_config, adapter);
177
178 if (fe) {
179 fe->ops.tuner_ops.set_params = lgtdqcs001f_tuner_set;
180 dprintk(MANTIS_ERROR, 1, "found STV0299 DVB-S frontend @ 0x%02x",
181 lgtdqcs001f_config.demod_address);
182
183 dprintk(MANTIS_ERROR, 1, "Mantis DVB-S STV0299 frontend attach success");
184 } else {
185 return -1;
186 }
187 } else {
188 dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
189 adapter->name,
190 err);
191
192 return -EIO;
193 }
194 mantis->fe = fe;
195 dprintk(MANTIS_ERROR, 1, "Done!");
196
197 return 0;
198}
199
200struct mantis_hwconfig vp1033_config = {
201 .model_name = MANTIS_MODEL_NAME,
202 .dev_type = MANTIS_DEV_TYPE,
203 .ts_size = MANTIS_TS_204,
204
205 .baud_rate = MANTIS_BAUD_9600,
206 .parity = MANTIS_PARITY_NONE,
207 .bytes = 0,
208
209 .frontend_init = vp1033_frontend_init,
210 .power = GPIF_A12,
211 .reset = GPIF_A13,
212};
diff --git a/drivers/media/dvb/mantis/mantis_vp1033.h b/drivers/media/dvb/mantis/mantis_vp1033.h
new file mode 100644
index 000000000000..7daaa1bf127d
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1033.h
@@ -0,0 +1,30 @@
1/*
2 Mantis VP-1033 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_VP1033_H
22#define __MANTIS_VP1033_H
23
24#include "mantis_common.h"
25
26#define MANTIS_VP_1033_DVB_S 0x0016
27
28extern struct mantis_hwconfig vp1033_config;
29
30#endif /* __MANTIS_VP1033_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp1034.c b/drivers/media/dvb/mantis/mantis_vp1034.c
new file mode 100644
index 000000000000..8e6ae558ee57
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1034.c
@@ -0,0 +1,119 @@
1/*
2 Mantis VP-1034 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24
25#include "dmxdev.h"
26#include "dvbdev.h"
27#include "dvb_demux.h"
28#include "dvb_frontend.h"
29#include "dvb_net.h"
30
31#include "mb86a16.h"
32#include "mantis_common.h"
33#include "mantis_ioc.h"
34#include "mantis_dvb.h"
35#include "mantis_vp1034.h"
36#include "mantis_reg.h"
37
38struct mb86a16_config vp1034_mb86a16_config = {
39 .demod_address = 0x08,
40 .set_voltage = vp1034_set_voltage,
41};
42
43#define MANTIS_MODEL_NAME "VP-1034"
44#define MANTIS_DEV_TYPE "DVB-S/DSS"
45
46int vp1034_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
47{
48 struct mantis_pci *mantis = fe->dvb->priv;
49
50 switch (voltage) {
51 case SEC_VOLTAGE_13:
52 dprintk(MANTIS_ERROR, 1, "Polarization=[13V]");
53 gpio_set_bits(mantis, 13, 1);
54 gpio_set_bits(mantis, 14, 0);
55 break;
56 case SEC_VOLTAGE_18:
57 dprintk(MANTIS_ERROR, 1, "Polarization=[18V]");
58 gpio_set_bits(mantis, 13, 1);
59 gpio_set_bits(mantis, 14, 1);
60 break;
61 case SEC_VOLTAGE_OFF:
62 dprintk(MANTIS_ERROR, 1, "Frontend (dummy) POWERDOWN");
63 break;
64 default:
65 dprintk(MANTIS_ERROR, 1, "Invalid = (%d)", (u32) voltage);
66 return -EINVAL;
67 }
68 mmwrite(0x00, MANTIS_GPIF_DOUT);
69
70 return 0;
71}
72
73static int vp1034_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
74{
75 struct i2c_adapter *adapter = &mantis->adapter;
76
77 int err = 0;
78
79 err = mantis_frontend_power(mantis, POWER_ON);
80 if (err == 0) {
81 mantis_frontend_soft_reset(mantis);
82 msleep(250);
83
84 dprintk(MANTIS_ERROR, 1, "Probing for MB86A16 (DVB-S/DSS)");
85 fe = mb86a16_attach(&vp1034_mb86a16_config, adapter);
86 if (fe) {
87 dprintk(MANTIS_ERROR, 1,
88 "found MB86A16 DVB-S/DSS frontend @0x%02x",
89 vp1034_mb86a16_config.demod_address);
90
91 } else {
92 return -1;
93 }
94 } else {
95 dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
96 adapter->name,
97 err);
98
99 return -EIO;
100 }
101 mantis->fe = fe;
102 dprintk(MANTIS_ERROR, 1, "Done!");
103
104 return 0;
105}
106
107struct mantis_hwconfig vp1034_config = {
108 .model_name = MANTIS_MODEL_NAME,
109 .dev_type = MANTIS_DEV_TYPE,
110 .ts_size = MANTIS_TS_204,
111
112 .baud_rate = MANTIS_BAUD_9600,
113 .parity = MANTIS_PARITY_NONE,
114 .bytes = 0,
115
116 .frontend_init = vp1034_frontend_init,
117 .power = GPIF_A12,
118 .reset = GPIF_A13,
119};
diff --git a/drivers/media/dvb/mantis/mantis_vp1034.h b/drivers/media/dvb/mantis/mantis_vp1034.h
new file mode 100644
index 000000000000..323f38ef8e3d
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1034.h
@@ -0,0 +1,33 @@
1/*
2 Mantis VP-1034 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_VP1034_H
22#define __MANTIS_VP1034_H
23
24#include "dvb_frontend.h"
25#include "mantis_common.h"
26
27
28#define MANTIS_VP_1034_DVB_S 0x0014
29
30extern struct mantis_hwconfig vp1034_config;
31extern int vp1034_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage);
32
33#endif /* __MANTIS_VP1034_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp1041.c b/drivers/media/dvb/mantis/mantis_vp1041.c
new file mode 100644
index 000000000000..515346dd31d0
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1041.c
@@ -0,0 +1,358 @@
1/*
2 Mantis VP-1041 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24
25#include "dmxdev.h"
26#include "dvbdev.h"
27#include "dvb_demux.h"
28#include "dvb_frontend.h"
29#include "dvb_net.h"
30
31#include "mantis_common.h"
32#include "mantis_ioc.h"
33#include "mantis_dvb.h"
34#include "mantis_vp1041.h"
35#include "stb0899_reg.h"
36#include "stb0899_drv.h"
37#include "stb0899_cfg.h"
38#include "stb6100_cfg.h"
39#include "stb6100.h"
40#include "lnbp21.h"
41
42#define MANTIS_MODEL_NAME "VP-1041"
43#define MANTIS_DEV_TYPE "DSS/DVB-S/DVB-S2"
44
45static const struct stb0899_s1_reg vp1041_stb0899_s1_init_1[] = {
46
47 /* 0x0000000b, *//* SYSREG */
48 { STB0899_DEV_ID , 0x30 },
49 { STB0899_DISCNTRL1 , 0x32 },
50 { STB0899_DISCNTRL2 , 0x80 },
51 { STB0899_DISRX_ST0 , 0x04 },
52 { STB0899_DISRX_ST1 , 0x00 },
53 { STB0899_DISPARITY , 0x00 },
54 { STB0899_DISFIFO , 0x00 },
55 { STB0899_DISSTATUS , 0x20 },
56 { STB0899_DISF22 , 0x99 },
57 { STB0899_DISF22RX , 0xa8 },
58 /* SYSREG ? */
59 { STB0899_ACRPRESC , 0x11 },
60 { STB0899_ACRDIV1 , 0x0a },
61 { STB0899_ACRDIV2 , 0x05 },
62 { STB0899_DACR1 , 0x00 },
63 { STB0899_DACR2 , 0x00 },
64 { STB0899_OUTCFG , 0x00 },
65 { STB0899_MODECFG , 0x00 },
66 { STB0899_IRQSTATUS_3 , 0xfe },
67 { STB0899_IRQSTATUS_2 , 0x03 },
68 { STB0899_IRQSTATUS_1 , 0x7c },
69 { STB0899_IRQSTATUS_0 , 0xf4 },
70 { STB0899_IRQMSK_3 , 0xf3 },
71 { STB0899_IRQMSK_2 , 0xfc },
72 { STB0899_IRQMSK_1 , 0xff },
73 { STB0899_IRQMSK_0 , 0xff },
74 { STB0899_IRQCFG , 0x00 },
75 { STB0899_I2CCFG , 0x88 },
76 { STB0899_I2CRPT , 0x58 },
77 { STB0899_IOPVALUE5 , 0x00 },
78 { STB0899_IOPVALUE4 , 0x33 },
79 { STB0899_IOPVALUE3 , 0x6d },
80 { STB0899_IOPVALUE2 , 0x90 },
81 { STB0899_IOPVALUE1 , 0x60 },
82 { STB0899_IOPVALUE0 , 0x00 },
83 { STB0899_GPIO00CFG , 0x82 },
84 { STB0899_GPIO01CFG , 0x82 },
85 { STB0899_GPIO02CFG , 0x82 },
86 { STB0899_GPIO03CFG , 0x82 },
87 { STB0899_GPIO04CFG , 0x82 },
88 { STB0899_GPIO05CFG , 0x82 },
89 { STB0899_GPIO06CFG , 0x82 },
90 { STB0899_GPIO07CFG , 0x82 },
91 { STB0899_GPIO08CFG , 0x82 },
92 { STB0899_GPIO09CFG , 0x82 },
93 { STB0899_GPIO10CFG , 0x82 },
94 { STB0899_GPIO11CFG , 0x82 },
95 { STB0899_GPIO12CFG , 0x82 },
96 { STB0899_GPIO13CFG , 0x82 },
97 { STB0899_GPIO14CFG , 0x82 },
98 { STB0899_GPIO15CFG , 0x82 },
99 { STB0899_GPIO16CFG , 0x82 },
100 { STB0899_GPIO17CFG , 0x82 },
101 { STB0899_GPIO18CFG , 0x82 },
102 { STB0899_GPIO19CFG , 0x82 },
103 { STB0899_GPIO20CFG , 0x82 },
104 { STB0899_SDATCFG , 0xb8 },
105 { STB0899_SCLTCFG , 0xba },
106 { STB0899_AGCRFCFG , 0x1c }, /* 0x11 */
107 { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
108 { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
109 { STB0899_DIRCLKCFG , 0x82 },
110 { STB0899_CLKOUT27CFG , 0x7e },
111 { STB0899_STDBYCFG , 0x82 },
112 { STB0899_CS0CFG , 0x82 },
113 { STB0899_CS1CFG , 0x82 },
114 { STB0899_DISEQCOCFG , 0x20 },
115 { STB0899_GPIO32CFG , 0x82 },
116 { STB0899_GPIO33CFG , 0x82 },
117 { STB0899_GPIO34CFG , 0x82 },
118 { STB0899_GPIO35CFG , 0x82 },
119 { STB0899_GPIO36CFG , 0x82 },
120 { STB0899_GPIO37CFG , 0x82 },
121 { STB0899_GPIO38CFG , 0x82 },
122 { STB0899_GPIO39CFG , 0x82 },
123 { STB0899_NCOARSE , 0x17 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
124 { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
125 { STB0899_FILTCTRL , 0x00 },
126 { STB0899_SYSCTRL , 0x01 },
127 { STB0899_STOPCLK1 , 0x20 },
128 { STB0899_STOPCLK2 , 0x00 },
129 { STB0899_INTBUFSTATUS , 0x00 },
130 { STB0899_INTBUFCTRL , 0x0a },
131 { 0xffff , 0xff },
132};
133
134static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
135 { STB0899_DEMOD , 0x00 },
136 { STB0899_RCOMPC , 0xc9 },
137 { STB0899_AGC1CN , 0x01 },
138 { STB0899_AGC1REF , 0x10 },
139 { STB0899_RTC , 0x23 },
140 { STB0899_TMGCFG , 0x4e },
141 { STB0899_AGC2REF , 0x34 },
142 { STB0899_TLSR , 0x84 },
143 { STB0899_CFD , 0xf7 },
144 { STB0899_ACLC , 0x87 },
145 { STB0899_BCLC , 0x94 },
146 { STB0899_EQON , 0x41 },
147 { STB0899_LDT , 0xf1 },
148 { STB0899_LDT2 , 0xe3 },
149 { STB0899_EQUALREF , 0xb4 },
150 { STB0899_TMGRAMP , 0x10 },
151 { STB0899_TMGTHD , 0x30 },
152 { STB0899_IDCCOMP , 0xfd },
153 { STB0899_QDCCOMP , 0xff },
154 { STB0899_POWERI , 0x0c },
155 { STB0899_POWERQ , 0x0f },
156 { STB0899_RCOMP , 0x6c },
157 { STB0899_AGCIQIN , 0x80 },
158 { STB0899_AGC2I1 , 0x06 },
159 { STB0899_AGC2I2 , 0x00 },
160 { STB0899_TLIR , 0x30 },
161 { STB0899_RTF , 0x7f },
162 { STB0899_DSTATUS , 0x00 },
163 { STB0899_LDI , 0xbc },
164 { STB0899_CFRM , 0xea },
165 { STB0899_CFRL , 0x31 },
166 { STB0899_NIRM , 0x2b },
167 { STB0899_NIRL , 0x80 },
168 { STB0899_ISYMB , 0x1d },
169 { STB0899_QSYMB , 0xa6 },
170 { STB0899_SFRH , 0x2f },
171 { STB0899_SFRM , 0x68 },
172 { STB0899_SFRL , 0x40 },
173 { STB0899_SFRUPH , 0x2f },
174 { STB0899_SFRUPM , 0x68 },
175 { STB0899_SFRUPL , 0x40 },
176 { STB0899_EQUAI1 , 0x02 },
177 { STB0899_EQUAQ1 , 0xff },
178 { STB0899_EQUAI2 , 0x04 },
179 { STB0899_EQUAQ2 , 0x05 },
180 { STB0899_EQUAI3 , 0x02 },
181 { STB0899_EQUAQ3 , 0xfd },
182 { STB0899_EQUAI4 , 0x03 },
183 { STB0899_EQUAQ4 , 0x07 },
184 { STB0899_EQUAI5 , 0x08 },
185 { STB0899_EQUAQ5 , 0xf5 },
186 { STB0899_DSTATUS2 , 0x00 },
187 { STB0899_VSTATUS , 0x00 },
188 { STB0899_VERROR , 0x86 },
189 { STB0899_IQSWAP , 0x2a },
190 { STB0899_ECNT1M , 0x00 },
191 { STB0899_ECNT1L , 0x00 },
192 { STB0899_ECNT2M , 0x00 },
193 { STB0899_ECNT2L , 0x00 },
194 { STB0899_ECNT3M , 0x0a },
195 { STB0899_ECNT3L , 0xad },
196 { STB0899_FECAUTO1 , 0x06 },
197 { STB0899_FECM , 0x01 },
198 { STB0899_VTH12 , 0xb0 },
199 { STB0899_VTH23 , 0x7a },
200 { STB0899_VTH34 , 0x58 },
201 { STB0899_VTH56 , 0x38 },
202 { STB0899_VTH67 , 0x34 },
203 { STB0899_VTH78 , 0x24 },
204 { STB0899_PRVIT , 0xff },
205 { STB0899_VITSYNC , 0x19 },
206 { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
207 { STB0899_TSULC , 0x42 },
208 { STB0899_RSLLC , 0x41 },
209 { STB0899_TSLPL , 0x12 },
210 { STB0899_TSCFGH , 0x0c },
211 { STB0899_TSCFGM , 0x00 },
212 { STB0899_TSCFGL , 0x00 },
213 { STB0899_TSOUT , 0x69 }, /* 0x0d for CAM */
214 { STB0899_RSSYNCDEL , 0x00 },
215 { STB0899_TSINHDELH , 0x02 },
216 { STB0899_TSINHDELM , 0x00 },
217 { STB0899_TSINHDELL , 0x00 },
218 { STB0899_TSLLSTKM , 0x1b },
219 { STB0899_TSLLSTKL , 0xb3 },
220 { STB0899_TSULSTKM , 0x00 },
221 { STB0899_TSULSTKL , 0x00 },
222 { STB0899_PCKLENUL , 0xbc },
223 { STB0899_PCKLENLL , 0xcc },
224 { STB0899_RSPCKLEN , 0xbd },
225 { STB0899_TSSTATUS , 0x90 },
226 { STB0899_ERRCTRL1 , 0xb6 },
227 { STB0899_ERRCTRL2 , 0x95 },
228 { STB0899_ERRCTRL3 , 0x8d },
229 { STB0899_DMONMSK1 , 0x27 },
230 { STB0899_DMONMSK0 , 0x03 },
231 { STB0899_DEMAPVIT , 0x5c },
232 { STB0899_PLPARM , 0x19 },
233 { STB0899_PDELCTRL , 0x48 },
234 { STB0899_PDELCTRL2 , 0x00 },
235 { STB0899_BBHCTRL1 , 0x00 },
236 { STB0899_BBHCTRL2 , 0x00 },
237 { STB0899_HYSTTHRESH , 0x77 },
238 { STB0899_MATCSTM , 0x00 },
239 { STB0899_MATCSTL , 0x00 },
240 { STB0899_UPLCSTM , 0x00 },
241 { STB0899_UPLCSTL , 0x00 },
242 { STB0899_DFLCSTM , 0x00 },
243 { STB0899_DFLCSTL , 0x00 },
244 { STB0899_SYNCCST , 0x00 },
245 { STB0899_SYNCDCSTM , 0x00 },
246 { STB0899_SYNCDCSTL , 0x00 },
247 { STB0899_ISI_ENTRY , 0x00 },
248 { STB0899_ISI_BIT_EN , 0x00 },
249 { STB0899_MATSTRM , 0xf0 },
250 { STB0899_MATSTRL , 0x02 },
251 { STB0899_UPLSTRM , 0x45 },
252 { STB0899_UPLSTRL , 0x60 },
253 { STB0899_DFLSTRM , 0xe3 },
254 { STB0899_DFLSTRL , 0x00 },
255 { STB0899_SYNCSTR , 0x47 },
256 { STB0899_SYNCDSTRM , 0x05 },
257 { STB0899_SYNCDSTRL , 0x18 },
258 { STB0899_CFGPDELSTATUS1 , 0x19 },
259 { STB0899_CFGPDELSTATUS2 , 0x2b },
260 { STB0899_BBFERRORM , 0x00 },
261 { STB0899_BBFERRORL , 0x01 },
262 { STB0899_UPKTERRORM , 0x00 },
263 { STB0899_UPKTERRORL , 0x00 },
264 { 0xffff , 0xff },
265};
266
267struct stb0899_config vp1041_stb0899_config = {
268 .init_dev = vp1041_stb0899_s1_init_1,
269 .init_s2_demod = stb0899_s2_init_2,
270 .init_s1_demod = vp1041_stb0899_s1_init_3,
271 .init_s2_fec = stb0899_s2_init_4,
272 .init_tst = stb0899_s1_init_5,
273
274 .demod_address = 0x68, /* 0xd0 >> 1 */
275
276 .xtal_freq = 27000000,
277 .inversion = IQ_SWAP_ON, /* 1 */
278
279 .lo_clk = 76500000,
280 .hi_clk = 99000000,
281
282 .esno_ave = STB0899_DVBS2_ESNO_AVE,
283 .esno_quant = STB0899_DVBS2_ESNO_QUANT,
284 .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
285 .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
286 .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
287 .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
288 .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
289 .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
290 .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
291
292 .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
293 .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
294 .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
295 .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
296
297 .tuner_get_frequency = stb6100_get_frequency,
298 .tuner_set_frequency = stb6100_set_frequency,
299 .tuner_set_bandwidth = stb6100_set_bandwidth,
300 .tuner_get_bandwidth = stb6100_get_bandwidth,
301 .tuner_set_rfsiggain = NULL,
302};
303
304struct stb6100_config vp1041_stb6100_config = {
305 .tuner_address = 0x60,
306 .refclock = 27000000,
307};
308
309static int vp1041_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
310{
311 struct i2c_adapter *adapter = &mantis->adapter;
312
313 int err = 0;
314
315 err = mantis_frontend_power(mantis, POWER_ON);
316 if (err == 0) {
317 mantis_frontend_soft_reset(mantis);
318 msleep(250);
319 mantis->fe = stb0899_attach(&vp1041_stb0899_config, adapter);
320 if (mantis->fe) {
321 dprintk(MANTIS_ERROR, 1,
322 "found STB0899 DVB-S/DVB-S2 frontend @0x%02x",
323 vp1041_stb0899_config.demod_address);
324
325 if (stb6100_attach(mantis->fe, &vp1041_stb6100_config, adapter)) {
326 if (!lnbp21_attach(mantis->fe, adapter, 0, 0))
327 dprintk(MANTIS_ERROR, 1, "No LNBP21 found!");
328 }
329 } else {
330 return -EREMOTEIO;
331 }
332 } else {
333 dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
334 adapter->name,
335 err);
336
337 return -EIO;
338 }
339
340
341 dprintk(MANTIS_ERROR, 1, "Done!");
342
343 return 0;
344}
345
346struct mantis_hwconfig vp1041_config = {
347 .model_name = MANTIS_MODEL_NAME,
348 .dev_type = MANTIS_DEV_TYPE,
349 .ts_size = MANTIS_TS_188,
350
351 .baud_rate = MANTIS_BAUD_9600,
352 .parity = MANTIS_PARITY_NONE,
353 .bytes = 0,
354
355 .frontend_init = vp1041_frontend_init,
356 .power = GPIF_A12,
357 .reset = GPIF_A13,
358};
diff --git a/drivers/media/dvb/mantis/mantis_vp1041.h b/drivers/media/dvb/mantis/mantis_vp1041.h
new file mode 100644
index 000000000000..1ae5b3de8081
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1041.h
@@ -0,0 +1,33 @@
1/*
2 Mantis VP-1041 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_VP1041_H
22#define __MANTIS_VP1041_H
23
24#include "mantis_common.h"
25
26#define MANTIS_VP_1041_DVB_S2 0x0031
27#define SKYSTAR_HD2_10 0x0001
28#define SKYSTAR_HD2_20 0x0003
29#define CINERGY_S2_PCI_HD 0x1179
30
31extern struct mantis_hwconfig vp1041_config;
32
33#endif /* __MANTIS_VP1041_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp2033.c b/drivers/media/dvb/mantis/mantis_vp2033.c
new file mode 100644
index 000000000000..10ce81790a8c
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2033.c
@@ -0,0 +1,187 @@
1/*
2 Mantis VP-2033 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24
25#include "dmxdev.h"
26#include "dvbdev.h"
27#include "dvb_demux.h"
28#include "dvb_frontend.h"
29#include "dvb_net.h"
30
31#include "tda1002x.h"
32#include "mantis_common.h"
33#include "mantis_ioc.h"
34#include "mantis_dvb.h"
35#include "mantis_vp2033.h"
36
37#define MANTIS_MODEL_NAME "VP-2033"
38#define MANTIS_DEV_TYPE "DVB-C"
39
40struct tda1002x_config vp2033_tda1002x_cu1216_config = {
41 .demod_address = 0x18 >> 1,
42 .invert = 1,
43};
44
45struct tda10023_config vp2033_tda10023_cu1216_config = {
46 .demod_address = 0x18 >> 1,
47 .invert = 1,
48};
49
50static u8 read_pwm(struct mantis_pci *mantis)
51{
52 struct i2c_adapter *adapter = &mantis->adapter;
53
54 u8 b = 0xff;
55 u8 pwm;
56 struct i2c_msg msg[] = {
57 {.addr = 0x50, .flags = 0, .buf = &b, .len = 1},
58 {.addr = 0x50, .flags = I2C_M_RD, .buf = &pwm, .len = 1}
59 };
60
61 if ((i2c_transfer(adapter, msg, 2) != 2)
62 || (pwm == 0xff))
63 pwm = 0x48;
64
65 return pwm;
66}
67
68static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
69{
70 struct mantis_pci *mantis = fe->dvb->priv;
71 struct i2c_adapter *adapter = &mantis->adapter;
72
73 u8 buf[6];
74 struct i2c_msg msg = {.addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf)};
75 int i;
76
77#define CU1216_IF 36125000
78#define TUNER_MUL 62500
79
80 u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
81
82 buf[0] = (div >> 8) & 0x7f;
83 buf[1] = div & 0xff;
84 buf[2] = 0xce;
85 buf[3] = (params->frequency < 150000000 ? 0x01 :
86 params->frequency < 445000000 ? 0x02 : 0x04);
87 buf[4] = 0xde;
88 buf[5] = 0x20;
89
90 if (fe->ops.i2c_gate_ctrl)
91 fe->ops.i2c_gate_ctrl(fe, 1);
92
93 if (i2c_transfer(adapter, &msg, 1) != 1)
94 return -EIO;
95
96 /* wait for the pll lock */
97 msg.flags = I2C_M_RD;
98 msg.len = 1;
99 for (i = 0; i < 20; i++) {
100 if (fe->ops.i2c_gate_ctrl)
101 fe->ops.i2c_gate_ctrl(fe, 1);
102
103 if (i2c_transfer(adapter, &msg, 1) == 1 && (buf[0] & 0x40))
104 break;
105
106 msleep(10);
107 }
108
109 /* switch the charge pump to the lower current */
110 msg.flags = 0;
111 msg.len = 2;
112 msg.buf = &buf[2];
113 buf[2] &= ~0x40;
114 if (fe->ops.i2c_gate_ctrl)
115 fe->ops.i2c_gate_ctrl(fe, 1);
116
117 if (i2c_transfer(adapter, &msg, 1) != 1)
118 return -EIO;
119
120 return 0;
121}
122
123static int vp2033_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
124{
125 struct i2c_adapter *adapter = &mantis->adapter;
126
127 int err = 0;
128
129 err = mantis_frontend_power(mantis, POWER_ON);
130 if (err == 0) {
131 mantis_frontend_soft_reset(mantis);
132 msleep(250);
133
134 dprintk(MANTIS_ERROR, 1, "Probing for CU1216 (DVB-C)");
135 fe = tda10021_attach(&vp2033_tda1002x_cu1216_config,
136 adapter,
137 read_pwm(mantis));
138
139 if (fe) {
140 dprintk(MANTIS_ERROR, 1,
141 "found Philips CU1216 DVB-C frontend (TDA10021) @ 0x%02x",
142 vp2033_tda1002x_cu1216_config.demod_address);
143 } else {
144 fe = tda10023_attach(&vp2033_tda10023_cu1216_config,
145 adapter,
146 read_pwm(mantis));
147
148 if (fe) {
149 dprintk(MANTIS_ERROR, 1,
150 "found Philips CU1216 DVB-C frontend (TDA10023) @ 0x%02x",
151 vp2033_tda1002x_cu1216_config.demod_address);
152 }
153 }
154
155 if (fe) {
156 fe->ops.tuner_ops.set_params = tda1002x_cu1216_tuner_set;
157 dprintk(MANTIS_ERROR, 1, "Mantis DVB-C Philips CU1216 frontend attach success");
158 } else {
159 return -1;
160 }
161 } else {
162 dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
163 adapter->name,
164 err);
165
166 return -EIO;
167 }
168
169 mantis->fe = fe;
170 dprintk(MANTIS_DEBUG, 1, "Done!");
171
172 return 0;
173}
174
175struct mantis_hwconfig vp2033_config = {
176 .model_name = MANTIS_MODEL_NAME,
177 .dev_type = MANTIS_DEV_TYPE,
178 .ts_size = MANTIS_TS_204,
179
180 .baud_rate = MANTIS_BAUD_9600,
181 .parity = MANTIS_PARITY_NONE,
182 .bytes = 0,
183
184 .frontend_init = vp2033_frontend_init,
185 .power = GPIF_A12,
186 .reset = GPIF_A13,
187};
diff --git a/drivers/media/dvb/mantis/mantis_vp2033.h b/drivers/media/dvb/mantis/mantis_vp2033.h
new file mode 100644
index 000000000000..c55242b79d54
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2033.h
@@ -0,0 +1,30 @@
1/*
2 Mantis VP-2033 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_VP2033_H
22#define __MANTIS_VP2033_H
23
24#include "mantis_common.h"
25
26#define MANTIS_VP_2033_DVB_C 0x0008
27
28extern struct mantis_hwconfig vp2033_config;
29
30#endif /* __MANTIS_VP2033_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp2040.c b/drivers/media/dvb/mantis/mantis_vp2040.c
new file mode 100644
index 000000000000..a7ca233e800b
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2040.c
@@ -0,0 +1,186 @@
1/*
2 Mantis VP-2040 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24
25#include "dmxdev.h"
26#include "dvbdev.h"
27#include "dvb_demux.h"
28#include "dvb_frontend.h"
29#include "dvb_net.h"
30
31#include "tda1002x.h"
32#include "mantis_common.h"
33#include "mantis_ioc.h"
34#include "mantis_dvb.h"
35#include "mantis_vp2040.h"
36
37#define MANTIS_MODEL_NAME "VP-2040"
38#define MANTIS_DEV_TYPE "DVB-C"
39
40struct tda1002x_config vp2040_tda1002x_cu1216_config = {
41 .demod_address = 0x18 >> 1,
42 .invert = 1,
43};
44
45struct tda10023_config vp2040_tda10023_cu1216_config = {
46 .demod_address = 0x18 >> 1,
47 .invert = 1,
48};
49
50static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
51{
52 struct mantis_pci *mantis = fe->dvb->priv;
53 struct i2c_adapter *adapter = &mantis->adapter;
54
55 u8 buf[6];
56 struct i2c_msg msg = {.addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf)};
57 int i;
58
59#define CU1216_IF 36125000
60#define TUNER_MUL 62500
61
62 u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
63
64 buf[0] = (div >> 8) & 0x7f;
65 buf[1] = div & 0xff;
66 buf[2] = 0xce;
67 buf[3] = (params->frequency < 150000000 ? 0x01 :
68 params->frequency < 445000000 ? 0x02 : 0x04);
69 buf[4] = 0xde;
70 buf[5] = 0x20;
71
72 if (fe->ops.i2c_gate_ctrl)
73 fe->ops.i2c_gate_ctrl(fe, 1);
74
75 if (i2c_transfer(adapter, &msg, 1) != 1)
76 return -EIO;
77
78 /* wait for the pll lock */
79 msg.flags = I2C_M_RD;
80 msg.len = 1;
81 for (i = 0; i < 20; i++) {
82 if (fe->ops.i2c_gate_ctrl)
83 fe->ops.i2c_gate_ctrl(fe, 1);
84
85 if (i2c_transfer(adapter, &msg, 1) == 1 && (buf[0] & 0x40))
86 break;
87
88 msleep(10);
89 }
90
91 /* switch the charge pump to the lower current */
92 msg.flags = 0;
93 msg.len = 2;
94 msg.buf = &buf[2];
95 buf[2] &= ~0x40;
96 if (fe->ops.i2c_gate_ctrl)
97 fe->ops.i2c_gate_ctrl(fe, 1);
98
99 if (i2c_transfer(adapter, &msg, 1) != 1)
100 return -EIO;
101
102 return 0;
103}
104
105static u8 read_pwm(struct mantis_pci *mantis)
106{
107 struct i2c_adapter *adapter = &mantis->adapter;
108
109 u8 b = 0xff;
110 u8 pwm;
111 struct i2c_msg msg[] = {
112 {.addr = 0x50, .flags = 0, .buf = &b, .len = 1},
113 {.addr = 0x50, .flags = I2C_M_RD, .buf = &pwm, .len = 1}
114 };
115
116 if ((i2c_transfer(adapter, msg, 2) != 2)
117 || (pwm == 0xff))
118 pwm = 0x48;
119
120 return pwm;
121}
122
123static int vp2040_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
124{
125 struct i2c_adapter *adapter = &mantis->adapter;
126
127 int err = 0;
128
129 err = mantis_frontend_power(mantis, POWER_ON);
130 if (err == 0) {
131 mantis_frontend_soft_reset(mantis);
132 msleep(250);
133
134 dprintk(MANTIS_ERROR, 1, "Probing for CU1216 (DVB-C)");
135 fe = tda10021_attach(&vp2040_tda1002x_cu1216_config,
136 adapter,
137 read_pwm(mantis));
138
139 if (fe) {
140 dprintk(MANTIS_ERROR, 1,
141 "found Philips CU1216 DVB-C frontend (TDA10021) @ 0x%02x",
142 vp2040_tda1002x_cu1216_config.demod_address);
143 } else {
144 fe = tda10023_attach(&vp2040_tda10023_cu1216_config,
145 adapter,
146 read_pwm(mantis));
147
148 if (fe) {
149 dprintk(MANTIS_ERROR, 1,
150 "found Philips CU1216 DVB-C frontend (TDA10023) @ 0x%02x",
151 vp2040_tda1002x_cu1216_config.demod_address);
152 }
153 }
154
155 if (fe) {
156 fe->ops.tuner_ops.set_params = tda1002x_cu1216_tuner_set;
157 dprintk(MANTIS_ERROR, 1, "Mantis DVB-C Philips CU1216 frontend attach success");
158 } else {
159 return -1;
160 }
161 } else {
162 dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
163 adapter->name,
164 err);
165
166 return -EIO;
167 }
168 mantis->fe = fe;
169 dprintk(MANTIS_DEBUG, 1, "Done!");
170
171 return 0;
172}
173
174struct mantis_hwconfig vp2040_config = {
175 .model_name = MANTIS_MODEL_NAME,
176 .dev_type = MANTIS_DEV_TYPE,
177 .ts_size = MANTIS_TS_204,
178
179 .baud_rate = MANTIS_BAUD_9600,
180 .parity = MANTIS_PARITY_NONE,
181 .bytes = 0,
182
183 .frontend_init = vp2040_frontend_init,
184 .power = GPIF_A12,
185 .reset = GPIF_A13,
186};
diff --git a/drivers/media/dvb/mantis/mantis_vp2040.h b/drivers/media/dvb/mantis/mantis_vp2040.h
new file mode 100644
index 000000000000..d125e219b685
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2040.h
@@ -0,0 +1,32 @@
1/*
2 Mantis VP-2040 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_VP2040_H
22#define __MANTIS_VP2040_H
23
24#include "mantis_common.h"
25
26#define MANTIS_VP_2040_DVB_C 0x0043
27#define CINERGY_C 0x1178
28#define CABLESTAR_HD2 0x0002
29
30extern struct mantis_hwconfig vp2040_config;
31
32#endif /* __MANTIS_VP2040_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp3028.c b/drivers/media/dvb/mantis/mantis_vp3028.c
new file mode 100644
index 000000000000..4155c838a18a
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3028.c
@@ -0,0 +1,38 @@
1/*
2 Mantis VP-3028 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include "mantis_common.h"
22#include "mantis_vp3028.h"
23
24struct zl10353_config mantis_vp3028_config = {
25 .demod_address = 0x0f,
26};
27
28#define MANTIS_MODEL_NAME "VP-3028"
29#define MANTIS_DEV_TYPE "DVB-T"
30
31struct mantis_hwconfig vp3028_mantis_config = {
32 .model_name = MANTIS_MODEL_NAME,
33 .dev_type = MANTIS_DEV_TYPE,
34 .ts_size = MANTIS_TS_188,
35 .baud_rate = MANTIS_BAUD_9600,
36 .parity = MANTIS_PARITY_NONE,
37 .bytes = 0,
38};
diff --git a/drivers/media/dvb/mantis/mantis_vp3028.h b/drivers/media/dvb/mantis/mantis_vp3028.h
new file mode 100644
index 000000000000..b07be6adc522
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3028.h
@@ -0,0 +1,33 @@
1/*
2 Mantis VP-3028 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_VP3028_H
22#define __MANTIS_VP3028_H
23
24#include "dvb_frontend.h"
25#include "mantis_common.h"
26#include "zl10353.h"
27
28#define MANTIS_VP_3028_DVB_T 0x0028
29
30extern struct zl10353_config mantis_vp3028_config;
31extern struct mantis_hwconfig vp3028_mantis_config;
32
33#endif /* __MANTIS_VP3028_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp3030.c b/drivers/media/dvb/mantis/mantis_vp3030.c
new file mode 100644
index 000000000000..1f4334214953
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3030.c
@@ -0,0 +1,105 @@
1/*
2 Mantis VP-3030 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24
25#include "dmxdev.h"
26#include "dvbdev.h"
27#include "dvb_demux.h"
28#include "dvb_frontend.h"
29#include "dvb_net.h"
30
31#include "zl10353.h"
32#include "tda665x.h"
33#include "mantis_common.h"
34#include "mantis_ioc.h"
35#include "mantis_dvb.h"
36#include "mantis_vp3030.h"
37
38struct zl10353_config mantis_vp3030_config = {
39 .demod_address = 0x0f,
40};
41
42struct tda665x_config env57h12d5_config = {
43 .name = "ENV57H12D5 (ET-50DT)",
44 .addr = 0x60,
45 .frequency_min = 47000000,
46 .frequency_max = 862000000,
47 .frequency_offst = 3616667,
48 .ref_multiplier = 6, /* 1/6 MHz */
49 .ref_divider = 100000, /* 1/6 MHz */
50};
51
52#define MANTIS_MODEL_NAME "VP-3030"
53#define MANTIS_DEV_TYPE "DVB-T"
54
55
56static int vp3030_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
57{
58 struct i2c_adapter *adapter = &mantis->adapter;
59 struct mantis_hwconfig *config = mantis->hwconfig;
60 int err = 0;
61
62 gpio_set_bits(mantis, config->reset, 0);
63 msleep(100);
64 err = mantis_frontend_power(mantis, POWER_ON);
65 msleep(100);
66 gpio_set_bits(mantis, config->reset, 1);
67
68 if (err == 0) {
69 msleep(250);
70 dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)");
71 fe = zl10353_attach(&mantis_vp3030_config, adapter);
72
73 if (!fe)
74 return -1;
75
76 tda665x_attach(fe, &env57h12d5_config, adapter);
77 } else {
78 dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
79 adapter->name,
80 err);
81
82 return -EIO;
83
84 }
85 mantis->fe = fe;
86 dprintk(MANTIS_ERROR, 1, "Done!");
87
88 return 0;
89}
90
91struct mantis_hwconfig vp3030_config = {
92 .model_name = MANTIS_MODEL_NAME,
93 .dev_type = MANTIS_DEV_TYPE,
94 .ts_size = MANTIS_TS_188,
95
96 .baud_rate = MANTIS_BAUD_9600,
97 .parity = MANTIS_PARITY_NONE,
98 .bytes = 0,
99
100 .frontend_init = vp3030_frontend_init,
101 .power = GPIF_A12,
102 .reset = GPIF_A13,
103
104 .i2c_mode = MANTIS_BYTE_MODE
105};
diff --git a/drivers/media/dvb/mantis/mantis_vp3030.h b/drivers/media/dvb/mantis/mantis_vp3030.h
new file mode 100644
index 000000000000..5f12c4266277
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3030.h
@@ -0,0 +1,30 @@
1/*
2 Mantis VP-3030 driver
3
4 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __MANTIS_VP3030_H
22#define __MANTIS_VP3030_H
23
24#include "mantis_common.h"
25
26#define MANTIS_VP_3030_DVB_T 0x0024
27
28extern struct mantis_hwconfig vp3030_config;
29
30#endif /* __MANTIS_VP3030_H */
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index e930a67d526b..bd6214d4ab3b 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1815,6 +1815,8 @@ static int vidioc_qbuf(struct file *file, void *priv,
1815 /* put the buffer in the 'queued' queue */ 1815 /* put the buffer in the 'queued' queue */
1816 i = gspca_dev->fr_q; 1816 i = gspca_dev->fr_q;
1817 gspca_dev->fr_queue[i] = index; 1817 gspca_dev->fr_queue[i] = index;
1818 if (gspca_dev->fr_i == i)
1819 gspca_dev->cur_frame = frame;
1818 gspca_dev->fr_q = (i + 1) % gspca_dev->nframes; 1820 gspca_dev->fr_q = (i + 1) % gspca_dev->nframes;
1819 PDEBUG(D_FRAM, "qbuf q:%d i:%d o:%d", 1821 PDEBUG(D_FRAM, "qbuf q:%d i:%d o:%d",
1820 gspca_dev->fr_q, 1822 gspca_dev->fr_q,
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index aa2f3c7e2cb5..1b536f7d30cf 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -48,6 +48,12 @@ static
48 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xa 2528") 48 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xa 2528")
49 } 49 }
50 }, { 50 }, {
51 .ident = "Fujitsu-Siemens Amilo Xi 2428",
52 .matches = {
53 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
54 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 2428")
55 }
56 }, {
51 .ident = "Fujitsu-Siemens Amilo Xi 2528", 57 .ident = "Fujitsu-Siemens Amilo Xi 2528",
52 .matches = { 58 .matches = {
53 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), 59 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 4dbb882c83dc..0a6b8f07a69d 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -1533,7 +1533,7 @@ static void setexposure_96(struct gspca_dev *gspca_dev)
1533static void setsharpness_96(struct gspca_dev *gspca_dev) 1533static void setsharpness_96(struct gspca_dev *gspca_dev)
1534{ 1534{
1535 struct sd *sd = (struct sd *) gspca_dev; 1535 struct sd *sd = (struct sd *) gspca_dev;
1536 u8 val; 1536 s8 val;
1537 1537
1538 val = sd->sharpness; 1538 val = sd->sharpness;
1539 if (val < 0) { /* auto */ 1539 if (val < 0) { /* auto */
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 4cff8035614f..0ca1c06652b1 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2319,7 +2319,7 @@ static void do_autogain(struct gspca_dev *gspca_dev, u16 avg_lum)
2319 } 2319 }
2320 } 2320 }
2321 if (avg_lum > MAX_AVG_LUM) { 2321 if (avg_lum > MAX_AVG_LUM) {
2322 if (sd->gain - 1 >= 0) { 2322 if (sd->gain >= 1) {
2323 sd->gain--; 2323 sd->gain--;
2324 set_gain(gspca_dev); 2324 set_gain(gspca_dev);
2325 } 2325 }
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
index 487d40555343..96c61926d372 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
@@ -228,6 +228,7 @@ static const struct stv_init stv_bridge_init[] = {
228 /* This reg is written twice. Some kind of reset? */ 228 /* This reg is written twice. Some kind of reset? */
229 {NULL, 0x1620, 0x80}, 229 {NULL, 0x1620, 0x80},
230 {NULL, 0x1620, 0x00}, 230 {NULL, 0x1620, 0x00},
231 {NULL, 0x1443, 0x00},
231 {NULL, 0x1423, 0x04}, 232 {NULL, 0x1423, 0x04},
232 {x1500, 0x1500, ARRAY_SIZE(x1500)}, 233 {x1500, 0x1500, ARRAY_SIZE(x1500)},
233 {x1536, 0x1536, ARRAY_SIZE(x1536)}, 234 {x1536, 0x1536, ARRAY_SIZE(x1536)},
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index 716df6b15fc5..306b7d75b4aa 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -709,7 +709,7 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
709 spca504B_PollingDataReady(gspca_dev); 709 spca504B_PollingDataReady(gspca_dev);
710 710
711 /* Init the cam width height with some values get on init ? */ 711 /* Init the cam width height with some values get on init ? */
712 reg_w_riv(gspca_dev, 0x31, 0, 0x04); 712 reg_w_riv(gspca_dev, 0x31, 0x0004, 0x00);
713 spca504B_WaitCmdStatus(gspca_dev); 713 spca504B_WaitCmdStatus(gspca_dev);
714 spca504B_PollingDataReady(gspca_dev); 714 spca504B_PollingDataReady(gspca_dev);
715 break; 715 break;
@@ -807,14 +807,14 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev)
807 default: 807 default:
808/* case BRIDGE_SPCA533: */ 808/* case BRIDGE_SPCA533: */
809/* case BRIDGE_SPCA504B: */ 809/* case BRIDGE_SPCA504B: */
810 reg_w_riv(gspca_dev, 0, 0x00, 0x21ad); /* hue */ 810 reg_w_riv(gspca_dev, 0, 0x21ad, 0x00); /* hue */
811 reg_w_riv(gspca_dev, 0, 0x01, 0x21ac); /* sat/hue */ 811 reg_w_riv(gspca_dev, 0, 0x21ac, 0x01); /* sat/hue */
812 reg_w_riv(gspca_dev, 0, 0x00, 0x21a3); /* gamma */ 812 reg_w_riv(gspca_dev, 0, 0x21a3, 0x00); /* gamma */
813 break; 813 break;
814 case BRIDGE_SPCA536: 814 case BRIDGE_SPCA536:
815 reg_w_riv(gspca_dev, 0, 0x40, 0x20f5); 815 reg_w_riv(gspca_dev, 0, 0x20f5, 0x40);
816 reg_w_riv(gspca_dev, 0, 0x01, 0x20f4); 816 reg_w_riv(gspca_dev, 0, 0x20f4, 0x01);
817 reg_w_riv(gspca_dev, 0, 0x00, 0x2089); 817 reg_w_riv(gspca_dev, 0, 0x2089, 0x00);
818 break; 818 break;
819 } 819 }
820 if (pollreg) 820 if (pollreg)
@@ -887,11 +887,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
887 switch (sd->bridge) { 887 switch (sd->bridge) {
888 case BRIDGE_SPCA504B: 888 case BRIDGE_SPCA504B:
889 reg_w_riv(gspca_dev, 0x1d, 0x00, 0); 889 reg_w_riv(gspca_dev, 0x1d, 0x00, 0);
890 reg_w_riv(gspca_dev, 0, 0x01, 0x2306); 890 reg_w_riv(gspca_dev, 0x00, 0x2306, 0x01);
891 reg_w_riv(gspca_dev, 0, 0x00, 0x0d04); 891 reg_w_riv(gspca_dev, 0x00, 0x0d04, 0x00);
892 reg_w_riv(gspca_dev, 0, 0x00, 0x2000); 892 reg_w_riv(gspca_dev, 0x00, 0x2000, 0x00);
893 reg_w_riv(gspca_dev, 0, 0x13, 0x2301); 893 reg_w_riv(gspca_dev, 0x00, 0x2301, 0x13);
894 reg_w_riv(gspca_dev, 0, 0x00, 0x2306); 894 reg_w_riv(gspca_dev, 0x00, 0x2306, 0x00);
895 /* fall thru */ 895 /* fall thru */
896 case BRIDGE_SPCA533: 896 case BRIDGE_SPCA533:
897 spca504B_PollingDataReady(gspca_dev); 897 spca504B_PollingDataReady(gspca_dev);
@@ -1000,7 +1000,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
1000 spca504B_WaitCmdStatus(gspca_dev); 1000 spca504B_WaitCmdStatus(gspca_dev);
1001 break; 1001 break;
1002 default: 1002 default:
1003 reg_w_riv(gspca_dev, 0x31, 0, 0x04); 1003 reg_w_riv(gspca_dev, 0x31, 0x0004, 0x00);
1004 spca504B_WaitCmdStatus(gspca_dev); 1004 spca504B_WaitCmdStatus(gspca_dev);
1005 spca504B_PollingDataReady(gspca_dev); 1005 spca504B_PollingDataReady(gspca_dev);
1006 break; 1006 break;
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index c090efcd8045..71921c878424 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -3009,6 +3009,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
3009 int l; 3009 int l;
3010 3010
3011 frame = gspca_get_i_frame(gspca_dev); 3011 frame = gspca_get_i_frame(gspca_dev);
3012 if (frame == NULL) {
3013 gspca_dev->last_packet_type = DISCARD_PACKET;
3014 return;
3015 }
3012 l = frame->data_end - frame->data; 3016 l = frame->data_end - frame->data;
3013 if (len > frame->v4l2_buf.length - l) 3017 if (len > frame->v4l2_buf.length - l)
3014 len = frame->v4l2_buf.length - l; 3018 len = frame->v4l2_buf.length - l;
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 2ba14fb5b031..c167cc3de492 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -718,7 +718,7 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
718 718
719 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 719 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
720 irq = platform_get_irq(pdev, 0); 720 irq = platform_get_irq(pdev, 0);
721 if (!res || !irq) { 721 if (!res || (int)irq <= 0) {
722 err = -ENODEV; 722 err = -ENODEV;
723 goto exit; 723 goto exit;
724 } 724 }
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 7e42989ce0e4..805226e0d9c1 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -563,7 +563,7 @@ static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
563 struct i2c_client *client = sd->priv; 563 struct i2c_client *client = sd->priv;
564 struct rj54n1 *rj54n1 = to_rj54n1(client); 564 struct rj54n1 *rj54n1 = to_rj54n1(client);
565 struct v4l2_rect *rect = &a->c; 565 struct v4l2_rect *rect = &a->c;
566 unsigned int dummy, output_w, output_h, 566 unsigned int dummy = 0, output_w, output_h,
567 input_w = rect->width, input_h = rect->height; 567 input_w = rect->width, input_h = rect->height;
568 int ret; 568 int ret;
569 569
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 9f85e917f9f3..a7ad7810fddc 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -420,19 +420,6 @@ int saa7134_set_dmabits(struct saa7134_dev *dev)
420 ctrl |= SAA7134_MAIN_CTRL_TE5; 420 ctrl |= SAA7134_MAIN_CTRL_TE5;
421 irq |= SAA7134_IRQ1_INTE_RA2_1 | 421 irq |= SAA7134_IRQ1_INTE_RA2_1 |
422 SAA7134_IRQ1_INTE_RA2_0; 422 SAA7134_IRQ1_INTE_RA2_0;
423
424 /* dma: setup channel 5 (= TS) */
425
426 saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
427 saa_writeb(SAA7134_TS_DMA1,
428 ((dev->ts.nr_packets - 1) >> 8) & 0xff);
429 /* TSNOPIT=0, TSCOLAP=0 */
430 saa_writeb(SAA7134_TS_DMA2,
431 (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
432 saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
433 saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
434 SAA7134_RS_CONTROL_ME |
435 (dev->ts.pt_ts.dma >> 12));
436 } 423 }
437 424
438 /* set task conditions + field handling */ 425 /* set task conditions + field handling */
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 7dfecfc6017c..ee5bff02a92c 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -93,9 +93,9 @@ static int ts_open(struct file *file)
93 dprintk("open dev=%s\n", video_device_node_name(vdev)); 93 dprintk("open dev=%s\n", video_device_node_name(vdev));
94 err = -EBUSY; 94 err = -EBUSY;
95 if (!mutex_trylock(&dev->empress_tsq.vb_lock)) 95 if (!mutex_trylock(&dev->empress_tsq.vb_lock))
96 goto done; 96 return err;
97 if (atomic_read(&dev->empress_users)) 97 if (atomic_read(&dev->empress_users))
98 goto done_up; 98 goto done;
99 99
100 /* Unmute audio */ 100 /* Unmute audio */
101 saa_writeb(SAA7134_AUDIO_MUTE_CTRL, 101 saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
@@ -105,10 +105,8 @@ static int ts_open(struct file *file)
105 file->private_data = dev; 105 file->private_data = dev;
106 err = 0; 106 err = 0;
107 107
108done_up:
109 mutex_unlock(&dev->empress_tsq.vb_lock);
110done: 108done:
111 unlock_kernel(); 109 mutex_unlock(&dev->empress_tsq.vb_lock);
112 return err; 110 return err;
113} 111}
114 112
diff --git a/drivers/media/video/saa7134/saa7134-ts.c b/drivers/media/video/saa7134/saa7134-ts.c
index 03488ba4c99c..b9817d74943f 100644
--- a/drivers/media/video/saa7134/saa7134-ts.c
+++ b/drivers/media/video/saa7134/saa7134-ts.c
@@ -250,6 +250,19 @@ int saa7134_ts_start(struct saa7134_dev *dev)
250 250
251 BUG_ON(dev->ts_started); 251 BUG_ON(dev->ts_started);
252 252
253 /* dma: setup channel 5 (= TS) */
254 saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
255 saa_writeb(SAA7134_TS_DMA1,
256 ((dev->ts.nr_packets - 1) >> 8) & 0xff);
257 /* TSNOPIT=0, TSCOLAP=0 */
258 saa_writeb(SAA7134_TS_DMA2,
259 (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
260 saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
261 saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
262 SAA7134_RS_CONTROL_ME |
263 (dev->ts.pt_ts.dma >> 12));
264
265 /* reset hardware TS buffers */
253 saa_writeb(SAA7134_TS_SERIAL1, 0x00); 266 saa_writeb(SAA7134_TS_SERIAL1, 0x00);
254 saa_writeb(SAA7134_TS_SERIAL1, 0x03); 267 saa_writeb(SAA7134_TS_SERIAL1, 0x03);
255 saa_writeb(SAA7134_TS_SERIAL1, 0x00); 268 saa_writeb(SAA7134_TS_SERIAL1, 0x00);
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index d69363f0d8c9..f09c7140d6b2 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -1827,7 +1827,7 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
1827 1827
1828 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1828 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1829 irq = platform_get_irq(pdev, 0); 1829 irq = platform_get_irq(pdev, 0);
1830 if (!res || !irq) { 1830 if (!res || (int)irq <= 0) {
1831 dev_err(&pdev->dev, "Not enough CEU platform resources.\n"); 1831 dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
1832 err = -ENODEV; 1832 err = -ENODEV;
1833 goto exit; 1833 goto exit;
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 0469d7a876a8..ec8ef8c5560a 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1393,7 +1393,7 @@ uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
1393 size = entity->processing.bControlSize; 1393 size = entity->processing.bControlSize;
1394 1394
1395 for (i = 0; i < ARRAY_SIZE(blacklist); ++i) { 1395 for (i = 0; i < ARRAY_SIZE(blacklist); ++i) {
1396 if (!usb_match_id(dev->intf, &blacklist[i].id)) 1396 if (!usb_match_one_id(dev->intf, &blacklist[i].id))
1397 continue; 1397 continue;
1398 1398
1399 if (blacklist[i].index >= 8 * size || 1399 if (blacklist[i].index >= 8 * size ||
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index f854698c4061..ea11839cba4a 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -59,9 +59,9 @@
59 * returns immediately. 59 * returns immediately.
60 * 60 *
61 * When the buffer is full, the completion handler removes it from the irq 61 * When the buffer is full, the completion handler removes it from the irq
62 * queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue. 62 * queue, marks it as done (UVC_BUF_STATE_DONE) and wakes its wait queue.
63 * At that point, any process waiting on the buffer will be woken up. If a 63 * At that point, any process waiting on the buffer will be woken up. If a
64 * process tries to dequeue a buffer after it has been marked ready, the 64 * process tries to dequeue a buffer after it has been marked done, the
65 * dequeing will succeed immediately. 65 * dequeing will succeed immediately.
66 * 66 *
67 * 2. Buffers are queued, user is waiting on a buffer and the device gets 67 * 2. Buffers are queued, user is waiting on a buffer and the device gets
@@ -201,6 +201,7 @@ static void __uvc_query_buffer(struct uvc_buffer *buf,
201 break; 201 break;
202 case UVC_BUF_STATE_QUEUED: 202 case UVC_BUF_STATE_QUEUED:
203 case UVC_BUF_STATE_ACTIVE: 203 case UVC_BUF_STATE_ACTIVE:
204 case UVC_BUF_STATE_READY:
204 v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED; 205 v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
205 break; 206 break;
206 case UVC_BUF_STATE_IDLE: 207 case UVC_BUF_STATE_IDLE:
@@ -295,13 +296,15 @@ static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
295{ 296{
296 if (nonblocking) { 297 if (nonblocking) {
297 return (buf->state != UVC_BUF_STATE_QUEUED && 298 return (buf->state != UVC_BUF_STATE_QUEUED &&
298 buf->state != UVC_BUF_STATE_ACTIVE) 299 buf->state != UVC_BUF_STATE_ACTIVE &&
300 buf->state != UVC_BUF_STATE_READY)
299 ? 0 : -EAGAIN; 301 ? 0 : -EAGAIN;
300 } 302 }
301 303
302 return wait_event_interruptible(buf->wait, 304 return wait_event_interruptible(buf->wait,
303 buf->state != UVC_BUF_STATE_QUEUED && 305 buf->state != UVC_BUF_STATE_QUEUED &&
304 buf->state != UVC_BUF_STATE_ACTIVE); 306 buf->state != UVC_BUF_STATE_ACTIVE &&
307 buf->state != UVC_BUF_STATE_READY);
305} 308}
306 309
307/* 310/*
@@ -348,6 +351,7 @@ int uvc_dequeue_buffer(struct uvc_video_queue *queue,
348 case UVC_BUF_STATE_IDLE: 351 case UVC_BUF_STATE_IDLE:
349 case UVC_BUF_STATE_QUEUED: 352 case UVC_BUF_STATE_QUEUED:
350 case UVC_BUF_STATE_ACTIVE: 353 case UVC_BUF_STATE_ACTIVE:
354 case UVC_BUF_STATE_READY:
351 default: 355 default:
352 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u " 356 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
353 "(driver bug?).\n", buf->state); 357 "(driver bug?).\n", buf->state);
@@ -489,6 +493,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
489 493
490 spin_lock_irqsave(&queue->irqlock, flags); 494 spin_lock_irqsave(&queue->irqlock, flags);
491 list_del(&buf->queue); 495 list_del(&buf->queue);
496 buf->state = UVC_BUF_STATE_DONE;
492 if (!list_empty(&queue->irqqueue)) 497 if (!list_empty(&queue->irqqueue))
493 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, 498 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
494 queue); 499 queue);
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 9a9802830d41..7dcf534a0cf3 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -441,7 +441,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
441 if (fid != stream->last_fid && buf->buf.bytesused != 0) { 441 if (fid != stream->last_fid && buf->buf.bytesused != 0) {
442 uvc_trace(UVC_TRACE_FRAME, "Frame complete (FID bit " 442 uvc_trace(UVC_TRACE_FRAME, "Frame complete (FID bit "
443 "toggled).\n"); 443 "toggled).\n");
444 buf->state = UVC_BUF_STATE_DONE; 444 buf->state = UVC_BUF_STATE_READY;
445 return -EAGAIN; 445 return -EAGAIN;
446 } 446 }
447 447
@@ -470,7 +470,7 @@ static void uvc_video_decode_data(struct uvc_streaming *stream,
470 /* Complete the current frame if the buffer size was exceeded. */ 470 /* Complete the current frame if the buffer size was exceeded. */
471 if (len > maxlen) { 471 if (len > maxlen) {
472 uvc_trace(UVC_TRACE_FRAME, "Frame complete (overflow).\n"); 472 uvc_trace(UVC_TRACE_FRAME, "Frame complete (overflow).\n");
473 buf->state = UVC_BUF_STATE_DONE; 473 buf->state = UVC_BUF_STATE_READY;
474 } 474 }
475} 475}
476 476
@@ -482,7 +482,7 @@ static void uvc_video_decode_end(struct uvc_streaming *stream,
482 uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n"); 482 uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n");
483 if (data[0] == len) 483 if (data[0] == len)
484 uvc_trace(UVC_TRACE_FRAME, "EOF in empty payload.\n"); 484 uvc_trace(UVC_TRACE_FRAME, "EOF in empty payload.\n");
485 buf->state = UVC_BUF_STATE_DONE; 485 buf->state = UVC_BUF_STATE_READY;
486 if (stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID) 486 if (stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID)
487 stream->last_fid ^= UVC_STREAM_FID; 487 stream->last_fid ^= UVC_STREAM_FID;
488 } 488 }
@@ -568,8 +568,7 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
568 uvc_video_decode_end(stream, buf, mem, 568 uvc_video_decode_end(stream, buf, mem,
569 urb->iso_frame_desc[i].actual_length); 569 urb->iso_frame_desc[i].actual_length);
570 570
571 if (buf->state == UVC_BUF_STATE_DONE || 571 if (buf->state == UVC_BUF_STATE_READY)
572 buf->state == UVC_BUF_STATE_ERROR)
573 buf = uvc_queue_next_buffer(&stream->queue, buf); 572 buf = uvc_queue_next_buffer(&stream->queue, buf);
574 } 573 }
575} 574}
@@ -627,8 +626,7 @@ static void uvc_video_decode_bulk(struct urb *urb, struct uvc_streaming *stream,
627 if (!stream->bulk.skip_payload && buf != NULL) { 626 if (!stream->bulk.skip_payload && buf != NULL) {
628 uvc_video_decode_end(stream, buf, stream->bulk.header, 627 uvc_video_decode_end(stream, buf, stream->bulk.header,
629 stream->bulk.payload_size); 628 stream->bulk.payload_size);
630 if (buf->state == UVC_BUF_STATE_DONE || 629 if (buf->state == UVC_BUF_STATE_READY)
631 buf->state == UVC_BUF_STATE_ERROR)
632 buf = uvc_queue_next_buffer(&stream->queue, 630 buf = uvc_queue_next_buffer(&stream->queue,
633 buf); 631 buf);
634 } 632 }
@@ -669,7 +667,7 @@ static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
669 stream->bulk.payload_size == stream->bulk.max_payload_size) { 667 stream->bulk.payload_size == stream->bulk.max_payload_size) {
670 if (buf->buf.bytesused == stream->queue.buf_used) { 668 if (buf->buf.bytesused == stream->queue.buf_used) {
671 stream->queue.buf_used = 0; 669 stream->queue.buf_used = 0;
672 buf->state = UVC_BUF_STATE_DONE; 670 buf->state = UVC_BUF_STATE_READY;
673 uvc_queue_next_buffer(&stream->queue, buf); 671 uvc_queue_next_buffer(&stream->queue, buf);
674 stream->last_fid ^= UVC_STREAM_FID; 672 stream->last_fid ^= UVC_STREAM_FID;
675 } 673 }
@@ -924,10 +922,8 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
924static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags) 922static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
925{ 923{
926 struct usb_interface *intf = stream->intf; 924 struct usb_interface *intf = stream->intf;
927 struct usb_host_interface *alts; 925 struct usb_host_endpoint *ep;
928 struct usb_host_endpoint *ep = NULL; 926 unsigned int i;
929 int intfnum = stream->intfnum;
930 unsigned int bandwidth, psize, i;
931 int ret; 927 int ret;
932 928
933 stream->last_fid = -1; 929 stream->last_fid = -1;
@@ -936,6 +932,12 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
936 stream->bulk.payload_size = 0; 932 stream->bulk.payload_size = 0;
937 933
938 if (intf->num_altsetting > 1) { 934 if (intf->num_altsetting > 1) {
935 struct usb_host_endpoint *best_ep = NULL;
936 unsigned int best_psize = 3 * 1024;
937 unsigned int bandwidth;
938 unsigned int uninitialized_var(altsetting);
939 int intfnum = stream->intfnum;
940
939 /* Isochronous endpoint, select the alternate setting. */ 941 /* Isochronous endpoint, select the alternate setting. */
940 bandwidth = stream->ctrl.dwMaxPayloadTransferSize; 942 bandwidth = stream->ctrl.dwMaxPayloadTransferSize;
941 943
@@ -949,6 +951,9 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
949 } 951 }
950 952
951 for (i = 0; i < intf->num_altsetting; ++i) { 953 for (i = 0; i < intf->num_altsetting; ++i) {
954 struct usb_host_interface *alts;
955 unsigned int psize;
956
952 alts = &intf->altsetting[i]; 957 alts = &intf->altsetting[i];
953 ep = uvc_find_endpoint(alts, 958 ep = uvc_find_endpoint(alts,
954 stream->header.bEndpointAddress); 959 stream->header.bEndpointAddress);
@@ -958,21 +963,27 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
958 /* Check if the bandwidth is high enough. */ 963 /* Check if the bandwidth is high enough. */
959 psize = le16_to_cpu(ep->desc.wMaxPacketSize); 964 psize = le16_to_cpu(ep->desc.wMaxPacketSize);
960 psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3)); 965 psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
961 if (psize >= bandwidth) 966 if (psize >= bandwidth && psize <= best_psize) {
962 break; 967 altsetting = i;
968 best_psize = psize;
969 best_ep = ep;
970 }
963 } 971 }
964 972
965 if (i >= intf->num_altsetting) { 973 if (best_ep == NULL) {
966 uvc_trace(UVC_TRACE_VIDEO, "No fast enough alt setting " 974 uvc_trace(UVC_TRACE_VIDEO, "No fast enough alt setting "
967 "for requested bandwidth.\n"); 975 "for requested bandwidth.\n");
968 return -EIO; 976 return -EIO;
969 } 977 }
970 978
971 ret = usb_set_interface(stream->dev->udev, intfnum, i); 979 uvc_trace(UVC_TRACE_VIDEO, "Selecting alternate setting %u "
980 "(%u B/frame bandwidth).\n", altsetting, best_psize);
981
982 ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
972 if (ret < 0) 983 if (ret < 0)
973 return ret; 984 return ret;
974 985
975 ret = uvc_init_video_isoc(stream, ep, gfp_flags); 986 ret = uvc_init_video_isoc(stream, best_ep, gfp_flags);
976 } else { 987 } else {
977 /* Bulk endpoint, proceed to URB initialization. */ 988 /* Bulk endpoint, proceed to URB initialization. */
978 ep = uvc_find_endpoint(&intf->altsetting[0], 989 ep = uvc_find_endpoint(&intf->altsetting[0],
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 7ec9a04ced50..2337585001ea 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -365,8 +365,9 @@ enum uvc_buffer_state {
365 UVC_BUF_STATE_IDLE = 0, 365 UVC_BUF_STATE_IDLE = 0,
366 UVC_BUF_STATE_QUEUED = 1, 366 UVC_BUF_STATE_QUEUED = 1,
367 UVC_BUF_STATE_ACTIVE = 2, 367 UVC_BUF_STATE_ACTIVE = 2,
368 UVC_BUF_STATE_DONE = 3, 368 UVC_BUF_STATE_READY = 3,
369 UVC_BUF_STATE_ERROR = 4, 369 UVC_BUF_STATE_DONE = 4,
370 UVC_BUF_STATE_ERROR = 5,
370}; 371};
371 372
372struct uvc_buffer { 373struct uvc_buffer {
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 85bc6a685e36..44d2037e9e56 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -4330,6 +4330,8 @@ initChainBuffers(MPT_ADAPTER *ioc)
4330 4330
4331 if (ioc->bus_type == SPI) 4331 if (ioc->bus_type == SPI)
4332 num_chain *= MPT_SCSI_CAN_QUEUE; 4332 num_chain *= MPT_SCSI_CAN_QUEUE;
4333 else if (ioc->bus_type == SAS)
4334 num_chain *= MPT_SAS_CAN_QUEUE;
4333 else 4335 else
4334 num_chain *= MPT_FC_CAN_QUEUE; 4336 num_chain *= MPT_FC_CAN_QUEUE;
4335 4337
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 57752751712b..81279b3d694c 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1796 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " 1796 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
1797 "Command not in the active list! (sc=%p)\n", ioc->name, 1797 "Command not in the active list! (sc=%p)\n", ioc->name,
1798 SCpnt)); 1798 SCpnt));
1799 retval = 0; 1799 retval = SUCCESS;
1800 goto out; 1800 goto out;
1801 } 1801 }
1802 1802
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index efba7021948a..3d5f40cd69df 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -40,8 +40,7 @@
40 40
41#define SG_TABLESIZE 30 41#define SG_TABLESIZE 30
42 42
43static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int, 43static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long);
44 unsigned long);
45 44
46static spinlock_t i2o_config_lock; 45static spinlock_t i2o_config_lock;
47 46
@@ -751,7 +750,7 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
751 lock_kernel(); 750 lock_kernel();
752 switch (cmd) { 751 switch (cmd) {
753 case I2OGETIOPS: 752 case I2OGETIOPS:
754 ret = i2o_cfg_ioctl(NULL, file, cmd, arg); 753 ret = i2o_cfg_ioctl(file, cmd, arg);
755 break; 754 break;
756 case I2OPASSTHRU32: 755 case I2OPASSTHRU32:
757 ret = i2o_cfg_passthru32(file, cmd, arg); 756 ret = i2o_cfg_passthru32(file, cmd, arg);
@@ -984,11 +983,11 @@ out:
984/* 983/*
985 * IOCTL Handler 984 * IOCTL Handler
986 */ 985 */
987static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, 986static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
988 unsigned long arg)
989{ 987{
990 int ret; 988 int ret;
991 989
990 lock_kernel();
992 switch (cmd) { 991 switch (cmd) {
993 case I2OGETIOPS: 992 case I2OGETIOPS:
994 ret = i2o_cfg_getiops(arg); 993 ret = i2o_cfg_getiops(arg);
@@ -1044,7 +1043,7 @@ static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
1044 osm_debug("unknown ioctl called!\n"); 1043 osm_debug("unknown ioctl called!\n");
1045 ret = -EINVAL; 1044 ret = -EINVAL;
1046 } 1045 }
1047 1046 unlock_kernel();
1048 return ret; 1047 return ret;
1049} 1048}
1050 1049
@@ -1118,7 +1117,7 @@ static int cfg_release(struct inode *inode, struct file *file)
1118static const struct file_operations config_fops = { 1117static const struct file_operations config_fops = {
1119 .owner = THIS_MODULE, 1118 .owner = THIS_MODULE,
1120 .llseek = no_llseek, 1119 .llseek = no_llseek,
1121 .ioctl = i2o_cfg_ioctl, 1120 .unlocked_ioctl = i2o_cfg_ioctl,
1122#ifdef CONFIG_COMPAT 1121#ifdef CONFIG_COMPAT
1123 .compat_ioctl = i2o_cfg_compat_ioctl, 1122 .compat_ioctl = i2o_cfg_compat_ioctl,
1124#endif 1123#endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index ca2f2c4ff05e..e09eb4870db6 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_MFD_SM501) += sm501.o 5obj-$(CONFIG_MFD_SM501) += sm501.o
6obj-$(CONFIG_MFD_ASIC3) += asic3.o 6obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
7obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o 7obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o
8 8
9obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o 9obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
@@ -11,9 +11,9 @@ obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
11 11
12obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o 12obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
13 13
14obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o 14obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
15obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o 15obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
16obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o 16obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
17 17
18obj-$(CONFIG_MFD_WM8400) += wm8400-core.o 18obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
19wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o 19wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index e22128c3e9a8..95c1e6bd1729 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -80,6 +80,7 @@ struct asic3 {
80 u16 irq_bothedge[4]; 80 u16 irq_bothedge[4];
81 struct gpio_chip gpio; 81 struct gpio_chip gpio;
82 struct device *dev; 82 struct device *dev;
83 void __iomem *tmio_cnf;
83 84
84 struct asic3_clk clocks[ARRAY_SIZE(asic3_clk_init)]; 85 struct asic3_clk clocks[ARRAY_SIZE(asic3_clk_init)];
85}; 86};
@@ -685,8 +686,24 @@ static struct mfd_cell asic3_cell_ds1wm = {
685 .resources = ds1wm_resources, 686 .resources = ds1wm_resources,
686}; 687};
687 688
689static void asic3_mmc_pwr(struct platform_device *pdev, int state)
690{
691 struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
692
693 tmio_core_mmc_pwr(asic->tmio_cnf, 1 - asic->bus_shift, state);
694}
695
696static void asic3_mmc_clk_div(struct platform_device *pdev, int state)
697{
698 struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
699
700 tmio_core_mmc_clk_div(asic->tmio_cnf, 1 - asic->bus_shift, state);
701}
702
688static struct tmio_mmc_data asic3_mmc_data = { 703static struct tmio_mmc_data asic3_mmc_data = {
689 .hclk = 24576000, 704 .hclk = 24576000,
705 .set_pwr = asic3_mmc_pwr,
706 .set_clk_div = asic3_mmc_clk_div,
690}; 707};
691 708
692static struct resource asic3_mmc_resources[] = { 709static struct resource asic3_mmc_resources[] = {
@@ -696,11 +713,6 @@ static struct resource asic3_mmc_resources[] = {
696 .flags = IORESOURCE_MEM, 713 .flags = IORESOURCE_MEM,
697 }, 714 },
698 { 715 {
699 .start = ASIC3_SD_CONFIG_BASE,
700 .end = ASIC3_SD_CONFIG_BASE + 0x1ff,
701 .flags = IORESOURCE_MEM,
702 },
703 {
704 .start = 0, 716 .start = 0,
705 .end = 0, 717 .end = 0,
706 .flags = IORESOURCE_IRQ, 718 .flags = IORESOURCE_IRQ,
@@ -743,6 +755,10 @@ static int asic3_mmc_enable(struct platform_device *pdev)
743 asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF), 755 asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
744 ASIC3_SDHWCTRL_SDPWR, 1); 756 ASIC3_SDHWCTRL_SDPWR, 1);
745 757
758 /* ASIC3_SD_CTRL_BASE assumes 32-bit addressing, TMIO is 16-bit */
759 tmio_core_mmc_enable(asic->tmio_cnf, 1 - asic->bus_shift,
760 ASIC3_SD_CTRL_BASE >> 1);
761
746 return 0; 762 return 0;
747} 763}
748 764
@@ -797,10 +813,15 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
797 asic3_cell_ds1wm.data_size = sizeof(asic3_cell_ds1wm); 813 asic3_cell_ds1wm.data_size = sizeof(asic3_cell_ds1wm);
798 814
799 /* MMC */ 815 /* MMC */
816 asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
817 mem_sdio->start, 0x400 >> asic->bus_shift);
818 if (!asic->tmio_cnf) {
819 ret = -ENOMEM;
820 dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n");
821 goto out;
822 }
800 asic3_mmc_resources[0].start >>= asic->bus_shift; 823 asic3_mmc_resources[0].start >>= asic->bus_shift;
801 asic3_mmc_resources[0].end >>= asic->bus_shift; 824 asic3_mmc_resources[0].end >>= asic->bus_shift;
802 asic3_mmc_resources[1].start >>= asic->bus_shift;
803 asic3_mmc_resources[1].end >>= asic->bus_shift;
804 825
805 asic3_cell_mmc.platform_data = &asic3_cell_mmc; 826 asic3_cell_mmc.platform_data = &asic3_cell_mmc;
806 asic3_cell_mmc.data_size = sizeof(asic3_cell_mmc); 827 asic3_cell_mmc.data_size = sizeof(asic3_cell_mmc);
@@ -820,7 +841,10 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
820 841
821static void asic3_mfd_remove(struct platform_device *pdev) 842static void asic3_mfd_remove(struct platform_device *pdev)
822{ 843{
844 struct asic3 *asic = platform_get_drvdata(pdev);
845
823 mfd_remove_devices(&pdev->dev); 846 mfd_remove_devices(&pdev->dev);
847 iounmap(asic->tmio_cnf);
824} 848}
825 849
826/* Core */ 850/* Core */
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
index a1ade2324ea9..735c8a4d164f 100644
--- a/drivers/mfd/mc13783-core.c
+++ b/drivers/mfd/mc13783-core.c
@@ -619,6 +619,8 @@ err_revision:
619 } 619 }
620 /* This should go away (END) */ 620 /* This should go away (END) */
621 621
622 mc13783_unlock(mc13783);
623
622 if (pdata->flags & MC13783_USE_ADC) 624 if (pdata->flags & MC13783_USE_ADC)
623 mc13783_add_subdevice(mc13783, "mc13783-adc"); 625 mc13783_add_subdevice(mc13783, "mc13783-adc");
624 626
@@ -641,8 +643,6 @@ err_revision:
641 if (pdata->flags & MC13783_USE_TOUCHSCREEN) 643 if (pdata->flags & MC13783_USE_TOUCHSCREEN)
642 mc13783_add_subdevice(mc13783, "mc13783-ts"); 644 mc13783_add_subdevice(mc13783, "mc13783-ts");
643 645
644 mc13783_unlock(mc13783);
645
646 return 0; 646 return 0;
647} 647}
648 648
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 0a255c1f1ce7..bcf4687d4af5 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -38,6 +38,19 @@ enum {
38 T7L66XB_CELL_MMC, 38 T7L66XB_CELL_MMC,
39}; 39};
40 40
41static const struct resource t7l66xb_mmc_resources[] = {
42 {
43 .start = 0x800,
44 .end = 0x9ff,
45 .flags = IORESOURCE_MEM,
46 },
47 {
48 .start = IRQ_T7L66XB_MMC,
49 .end = IRQ_T7L66XB_MMC,
50 .flags = IORESOURCE_IRQ,
51 },
52};
53
41#define SCR_REVID 0x08 /* b Revision ID */ 54#define SCR_REVID 0x08 /* b Revision ID */
42#define SCR_IMR 0x42 /* b Interrupt Mask */ 55#define SCR_IMR 0x42 /* b Interrupt Mask */
43#define SCR_DEV_CTL 0xe0 /* b Device control */ 56#define SCR_DEV_CTL 0xe0 /* b Device control */
@@ -83,6 +96,9 @@ static int t7l66xb_mmc_enable(struct platform_device *mmc)
83 96
84 spin_unlock_irqrestore(&t7l66xb->lock, flags); 97 spin_unlock_irqrestore(&t7l66xb->lock, flags);
85 98
99 tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0,
100 t7l66xb_mmc_resources[0].start & 0xfffe);
101
86 return 0; 102 return 0;
87} 103}
88 104
@@ -106,28 +122,28 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc)
106 return 0; 122 return 0;
107} 123}
108 124
125static void t7l66xb_mmc_pwr(struct platform_device *mmc, int state)
126{
127 struct platform_device *dev = to_platform_device(mmc->dev.parent);
128 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
129
130 tmio_core_mmc_pwr(t7l66xb->scr + 0x200, 0, state);
131}
132
133static void t7l66xb_mmc_clk_div(struct platform_device *mmc, int state)
134{
135 struct platform_device *dev = to_platform_device(mmc->dev.parent);
136 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
137
138 tmio_core_mmc_clk_div(t7l66xb->scr + 0x200, 0, state);
139}
140
109/*--------------------------------------------------------------------------*/ 141/*--------------------------------------------------------------------------*/
110 142
111static struct tmio_mmc_data t7166xb_mmc_data = { 143static struct tmio_mmc_data t7166xb_mmc_data = {
112 .hclk = 24000000, 144 .hclk = 24000000,
113}; 145 .set_pwr = t7l66xb_mmc_pwr,
114 146 .set_clk_div = t7l66xb_mmc_clk_div,
115static const struct resource t7l66xb_mmc_resources[] = {
116 {
117 .start = 0x800,
118 .end = 0x9ff,
119 .flags = IORESOURCE_MEM,
120 },
121 {
122 .start = 0x200,
123 .end = 0x2ff,
124 .flags = IORESOURCE_MEM,
125 },
126 {
127 .start = IRQ_T7L66XB_MMC,
128 .end = IRQ_T7L66XB_MMC,
129 .flags = IORESOURCE_IRQ,
130 },
131}; 147};
132 148
133static const struct resource t7l66xb_nand_resources[] = { 149static const struct resource t7l66xb_nand_resources[] = {
@@ -282,6 +298,9 @@ static int t7l66xb_resume(struct platform_device *dev)
282 if (pdata && pdata->resume) 298 if (pdata && pdata->resume)
283 pdata->resume(dev); 299 pdata->resume(dev);
284 300
301 tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0,
302 t7l66xb_mmc_resources[0].start & 0xfffe);
303
285 return 0; 304 return 0;
286} 305}
287#else 306#else
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 3280ab33f88a..5c7f04343d5c 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -22,28 +22,52 @@ enum {
22 TC6387XB_CELL_MMC, 22 TC6387XB_CELL_MMC,
23}; 23};
24 24
25struct tc6387xb {
26 void __iomem *scr;
27 struct clk *clk32k;
28 struct resource rscr;
29};
30
31static struct resource tc6387xb_mmc_resources[] = {
32 {
33 .start = 0x800,
34 .end = 0x9ff,
35 .flags = IORESOURCE_MEM,
36 },
37 {
38 .start = 0,
39 .end = 0,
40 .flags = IORESOURCE_IRQ,
41 },
42};
43
44/*--------------------------------------------------------------------------*/
45
25#ifdef CONFIG_PM 46#ifdef CONFIG_PM
26static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state) 47static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
27{ 48{
28 struct clk *clk32k = platform_get_drvdata(dev); 49 struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
29 struct tc6387xb_platform_data *pdata = dev->dev.platform_data; 50 struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
30 51
31 if (pdata && pdata->suspend) 52 if (pdata && pdata->suspend)
32 pdata->suspend(dev); 53 pdata->suspend(dev);
33 clk_disable(clk32k); 54 clk_disable(tc6387xb->clk32k);
34 55
35 return 0; 56 return 0;
36} 57}
37 58
38static int tc6387xb_resume(struct platform_device *dev) 59static int tc6387xb_resume(struct platform_device *dev)
39{ 60{
40 struct clk *clk32k = platform_get_drvdata(dev); 61 struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
41 struct tc6387xb_platform_data *pdata = dev->dev.platform_data; 62 struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
42 63
43 clk_enable(clk32k); 64 clk_enable(tc6387xb->clk32k);
44 if (pdata && pdata->resume) 65 if (pdata && pdata->resume)
45 pdata->resume(dev); 66 pdata->resume(dev);
46 67
68 tmio_core_mmc_resume(tc6387xb->scr + 0x200, 0,
69 tc6387xb_mmc_resources[0].start & 0xfffe);
70
47 return 0; 71 return 0;
48} 72}
49#else 73#else
@@ -53,12 +77,32 @@ static int tc6387xb_resume(struct platform_device *dev)
53 77
54/*--------------------------------------------------------------------------*/ 78/*--------------------------------------------------------------------------*/
55 79
80static void tc6387xb_mmc_pwr(struct platform_device *mmc, int state)
81{
82 struct platform_device *dev = to_platform_device(mmc->dev.parent);
83 struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
84
85 tmio_core_mmc_pwr(tc6387xb->scr + 0x200, 0, state);
86}
87
88static void tc6387xb_mmc_clk_div(struct platform_device *mmc, int state)
89{
90 struct platform_device *dev = to_platform_device(mmc->dev.parent);
91 struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
92
93 tmio_core_mmc_clk_div(tc6387xb->scr + 0x200, 0, state);
94}
95
96
56static int tc6387xb_mmc_enable(struct platform_device *mmc) 97static int tc6387xb_mmc_enable(struct platform_device *mmc)
57{ 98{
58 struct platform_device *dev = to_platform_device(mmc->dev.parent); 99 struct platform_device *dev = to_platform_device(mmc->dev.parent);
59 struct clk *clk32k = platform_get_drvdata(dev); 100 struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
60 101
61 clk_enable(clk32k); 102 clk_enable(tc6387xb->clk32k);
103
104 tmio_core_mmc_enable(tc6387xb->scr + 0x200, 0,
105 tc6387xb_mmc_resources[0].start & 0xfffe);
62 106
63 return 0; 107 return 0;
64} 108}
@@ -66,36 +110,20 @@ static int tc6387xb_mmc_enable(struct platform_device *mmc)
66static int tc6387xb_mmc_disable(struct platform_device *mmc) 110static int tc6387xb_mmc_disable(struct platform_device *mmc)
67{ 111{
68 struct platform_device *dev = to_platform_device(mmc->dev.parent); 112 struct platform_device *dev = to_platform_device(mmc->dev.parent);
69 struct clk *clk32k = platform_get_drvdata(dev); 113 struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
70 114
71 clk_disable(clk32k); 115 clk_disable(tc6387xb->clk32k);
72 116
73 return 0; 117 return 0;
74} 118}
75 119
76/*--------------------------------------------------------------------------*/
77
78static struct tmio_mmc_data tc6387xb_mmc_data = { 120static struct tmio_mmc_data tc6387xb_mmc_data = {
79 .hclk = 24000000, 121 .hclk = 24000000,
122 .set_pwr = tc6387xb_mmc_pwr,
123 .set_clk_div = tc6387xb_mmc_clk_div,
80}; 124};
81 125
82static struct resource tc6387xb_mmc_resources[] = { 126/*--------------------------------------------------------------------------*/
83 {
84 .start = 0x800,
85 .end = 0x9ff,
86 .flags = IORESOURCE_MEM,
87 },
88 {
89 .start = 0x200,
90 .end = 0x2ff,
91 .flags = IORESOURCE_MEM,
92 },
93 {
94 .start = 0,
95 .end = 0,
96 .flags = IORESOURCE_IRQ,
97 },
98};
99 127
100static struct mfd_cell tc6387xb_cells[] = { 128static struct mfd_cell tc6387xb_cells[] = {
101 [TC6387XB_CELL_MMC] = { 129 [TC6387XB_CELL_MMC] = {
@@ -111,8 +139,9 @@ static struct mfd_cell tc6387xb_cells[] = {
111static int tc6387xb_probe(struct platform_device *dev) 139static int tc6387xb_probe(struct platform_device *dev)
112{ 140{
113 struct tc6387xb_platform_data *pdata = dev->dev.platform_data; 141 struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
114 struct resource *iomem; 142 struct resource *iomem, *rscr;
115 struct clk *clk32k; 143 struct clk *clk32k;
144 struct tc6387xb *tc6387xb;
116 int irq, ret; 145 int irq, ret;
117 146
118 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); 147 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -120,18 +149,40 @@ static int tc6387xb_probe(struct platform_device *dev)
120 return -EINVAL; 149 return -EINVAL;
121 } 150 }
122 151
152 tc6387xb = kzalloc(sizeof *tc6387xb, GFP_KERNEL);
153 if (!tc6387xb)
154 return -ENOMEM;
155
123 ret = platform_get_irq(dev, 0); 156 ret = platform_get_irq(dev, 0);
124 if (ret >= 0) 157 if (ret >= 0)
125 irq = ret; 158 irq = ret;
126 else 159 else
127 goto err_resource; 160 goto err_no_irq;
128 161
129 clk32k = clk_get(&dev->dev, "CLK_CK32K"); 162 clk32k = clk_get(&dev->dev, "CLK_CK32K");
130 if (IS_ERR(clk32k)) { 163 if (IS_ERR(clk32k)) {
131 ret = PTR_ERR(clk32k); 164 ret = PTR_ERR(clk32k);
165 goto err_no_clk;
166 }
167
168 rscr = &tc6387xb->rscr;
169 rscr->name = "tc6387xb-core";
170 rscr->start = iomem->start;
171 rscr->end = iomem->start + 0xff;
172 rscr->flags = IORESOURCE_MEM;
173
174 ret = request_resource(iomem, rscr);
175 if (ret)
132 goto err_resource; 176 goto err_resource;
177
178 tc6387xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
179 if (!tc6387xb->scr) {
180 ret = -ENOMEM;
181 goto err_ioremap;
133 } 182 }
134 platform_set_drvdata(dev, clk32k); 183
184 tc6387xb->clk32k = clk32k;
185 platform_set_drvdata(dev, tc6387xb);
135 186
136 if (pdata && pdata->enable) 187 if (pdata && pdata->enable)
137 pdata->enable(dev); 188 pdata->enable(dev);
@@ -149,8 +200,13 @@ static int tc6387xb_probe(struct platform_device *dev)
149 if (!ret) 200 if (!ret)
150 return 0; 201 return 0;
151 202
152 clk_put(clk32k); 203err_ioremap:
204 release_resource(&tc6387xb->rscr);
153err_resource: 205err_resource:
206 clk_put(clk32k);
207err_no_clk:
208err_no_irq:
209 kfree(tc6387xb);
154 return ret; 210 return ret;
155} 211}
156 212
@@ -195,3 +251,4 @@ MODULE_DESCRIPTION("Toshiba TC6387XB core driver");
195MODULE_LICENSE("GPL v2"); 251MODULE_LICENSE("GPL v2");
196MODULE_AUTHOR("Ian Molton"); 252MODULE_AUTHOR("Ian Molton");
197MODULE_ALIAS("platform:tc6387xb"); 253MODULE_ALIAS("platform:tc6387xb");
254
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 1429a7341a9a..4bc5a08a2b09 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -136,10 +136,6 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
136 return 0; 136 return 0;
137} 137}
138 138
139static struct tmio_mmc_data tc6393xb_mmc_data = {
140 .hclk = 24000000,
141};
142
143static struct resource __devinitdata tc6393xb_nand_resources[] = { 139static struct resource __devinitdata tc6393xb_nand_resources[] = {
144 { 140 {
145 .start = 0x1000, 141 .start = 0x1000,
@@ -165,11 +161,6 @@ static struct resource __devinitdata tc6393xb_mmc_resources[] = {
165 .flags = IORESOURCE_MEM, 161 .flags = IORESOURCE_MEM,
166 }, 162 },
167 { 163 {
168 .start = 0x200,
169 .end = 0x2ff,
170 .flags = IORESOURCE_MEM,
171 },
172 {
173 .start = IRQ_TC6393_MMC, 164 .start = IRQ_TC6393_MMC,
174 .end = IRQ_TC6393_MMC, 165 .end = IRQ_TC6393_MMC,
175 .flags = IORESOURCE_IRQ, 166 .flags = IORESOURCE_IRQ,
@@ -346,6 +337,50 @@ int tc6393xb_lcd_mode(struct platform_device *fb,
346} 337}
347EXPORT_SYMBOL(tc6393xb_lcd_mode); 338EXPORT_SYMBOL(tc6393xb_lcd_mode);
348 339
340static int tc6393xb_mmc_enable(struct platform_device *mmc)
341{
342 struct platform_device *dev = to_platform_device(mmc->dev.parent);
343 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
344
345 tmio_core_mmc_enable(tc6393xb->scr + 0x200, 0,
346 tc6393xb_mmc_resources[0].start & 0xfffe);
347
348 return 0;
349}
350
351static int tc6393xb_mmc_resume(struct platform_device *mmc)
352{
353 struct platform_device *dev = to_platform_device(mmc->dev.parent);
354 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
355
356 tmio_core_mmc_resume(tc6393xb->scr + 0x200, 0,
357 tc6393xb_mmc_resources[0].start & 0xfffe);
358
359 return 0;
360}
361
362static void tc6393xb_mmc_pwr(struct platform_device *mmc, int state)
363{
364 struct platform_device *dev = to_platform_device(mmc->dev.parent);
365 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
366
367 tmio_core_mmc_pwr(tc6393xb->scr + 0x200, 0, state);
368}
369
370static void tc6393xb_mmc_clk_div(struct platform_device *mmc, int state)
371{
372 struct platform_device *dev = to_platform_device(mmc->dev.parent);
373 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
374
375 tmio_core_mmc_clk_div(tc6393xb->scr + 0x200, 0, state);
376}
377
378static struct tmio_mmc_data tc6393xb_mmc_data = {
379 .hclk = 24000000,
380 .set_pwr = tc6393xb_mmc_pwr,
381 .set_clk_div = tc6393xb_mmc_clk_div,
382};
383
349static struct mfd_cell __devinitdata tc6393xb_cells[] = { 384static struct mfd_cell __devinitdata tc6393xb_cells[] = {
350 [TC6393XB_CELL_NAND] = { 385 [TC6393XB_CELL_NAND] = {
351 .name = "tmio-nand", 386 .name = "tmio-nand",
@@ -355,6 +390,8 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
355 }, 390 },
356 [TC6393XB_CELL_MMC] = { 391 [TC6393XB_CELL_MMC] = {
357 .name = "tmio-mmc", 392 .name = "tmio-mmc",
393 .enable = tc6393xb_mmc_enable,
394 .resume = tc6393xb_mmc_resume,
358 .driver_data = &tc6393xb_mmc_data, 395 .driver_data = &tc6393xb_mmc_data,
359 .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources), 396 .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
360 .resources = tc6393xb_mmc_resources, 397 .resources = tc6393xb_mmc_resources,
@@ -836,3 +873,4 @@ MODULE_LICENSE("GPL v2");
836MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer"); 873MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
837MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller"); 874MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
838MODULE_ALIAS("platform:tc6393xb"); 875MODULE_ALIAS("platform:tc6393xb");
876
diff --git a/drivers/mfd/tmio_core.c b/drivers/mfd/tmio_core.c
new file mode 100644
index 000000000000..eddc19ae464b
--- /dev/null
+++ b/drivers/mfd/tmio_core.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright(c) 2009 Ian Molton <spyro@f2s.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/mfd/tmio.h>
10
11int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
12{
13 /* Enable the MMC/SD Control registers */
14 sd_config_write16(cnf, shift, CNF_CMD, SDCREN);
15 sd_config_write32(cnf, shift, CNF_CTL_BASE, base & 0xfffe);
16
17 /* Disable SD power during suspend */
18 sd_config_write8(cnf, shift, CNF_PWR_CTL_3, 0x01);
19
20 /* The below is required but why? FIXME */
21 sd_config_write8(cnf, shift, CNF_STOP_CLK_CTL, 0x1f);
22
23 /* Power down SD bus */
24 sd_config_write8(cnf, shift, CNF_PWR_CTL_2, 0x00);
25
26 return 0;
27}
28EXPORT_SYMBOL(tmio_core_mmc_enable);
29
30int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base)
31{
32
33 /* Enable the MMC/SD Control registers */
34 sd_config_write16(cnf, shift, CNF_CMD, SDCREN);
35 sd_config_write32(cnf, shift, CNF_CTL_BASE, base & 0xfffe);
36
37 return 0;
38}
39EXPORT_SYMBOL(tmio_core_mmc_resume);
40
41void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state)
42{
43 sd_config_write8(cnf, shift, CNF_PWR_CTL_2, state ? 0x02 : 0x00);
44}
45EXPORT_SYMBOL(tmio_core_mmc_pwr);
46
47void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state)
48{
49 sd_config_write8(cnf, shift, CNF_SD_CLK_MODE, state ? 1 : 0);
50}
51EXPORT_SYMBOL(tmio_core_mmc_clk_div);
52
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 20d29bafc9f5..9df9a5ad38f9 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -568,12 +568,12 @@ static void twl4030_sih_do_edge(struct work_struct *work)
568 568
569 bytes[byte] &= ~(0x03 << off); 569 bytes[byte] &= ~(0x03 << off);
570 570
571 spin_lock_irq(&d->lock); 571 raw_spin_lock_irq(&d->lock);
572 if (d->status & IRQ_TYPE_EDGE_RISING) 572 if (d->status & IRQ_TYPE_EDGE_RISING)
573 bytes[byte] |= BIT(off + 1); 573 bytes[byte] |= BIT(off + 1);
574 if (d->status & IRQ_TYPE_EDGE_FALLING) 574 if (d->status & IRQ_TYPE_EDGE_FALLING)
575 bytes[byte] |= BIT(off + 0); 575 bytes[byte] |= BIT(off + 0);
576 spin_unlock_irq(&d->lock); 576 raw_spin_unlock_irq(&d->lock);
577 577
578 edge_change &= ~BIT(i); 578 edge_change &= ~BIT(i);
579 } 579 }
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 8485a7018060..9a970bd68775 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -134,8 +134,7 @@ static inline int is_reg_locked(struct wm8350 *wm8350, u8 reg)
134 wm8350->reg_cache[WM8350_SECURITY] == WM8350_UNLOCK_KEY) 134 wm8350->reg_cache[WM8350_SECURITY] == WM8350_UNLOCK_KEY)
135 return 0; 135 return 0;
136 136
137 if ((reg == WM8350_GPIO_CONFIGURATION_I_O) || 137 if ((reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
138 (reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
139 reg <= WM8350_GPIO_FUNCTION_SELECT_4) || 138 reg <= WM8350_GPIO_FUNCTION_SELECT_4) ||
140 (reg >= WM8350_BATTERY_CHARGER_CONTROL_1 && 139 (reg >= WM8350_BATTERY_CHARGER_CONTROL_1 &&
141 reg <= WM8350_BATTERY_CHARGER_CONTROL_3)) 140 reg <= WM8350_BATTERY_CHARGER_CONTROL_3))
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index c8df547c4747..9025f29e2707 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -434,7 +434,7 @@ int wm8350_register_irq(struct wm8350 *wm8350, int irq,
434 irq_handler_t handler, unsigned long flags, 434 irq_handler_t handler, unsigned long flags,
435 const char *name, void *data) 435 const char *name, void *data)
436{ 436{
437 if (irq < 0 || irq > WM8350_NUM_IRQ || !handler) 437 if (irq < 0 || irq >= WM8350_NUM_IRQ || !handler)
438 return -EINVAL; 438 return -EINVAL;
439 439
440 if (wm8350->irq[irq].handler) 440 if (wm8350->irq[irq].handler)
@@ -453,7 +453,7 @@ EXPORT_SYMBOL_GPL(wm8350_register_irq);
453 453
454int wm8350_free_irq(struct wm8350 *wm8350, int irq) 454int wm8350_free_irq(struct wm8350 *wm8350, int irq)
455{ 455{
456 if (irq < 0 || irq > WM8350_NUM_IRQ) 456 if (irq < 0 || irq >= WM8350_NUM_IRQ)
457 return -EINVAL; 457 return -EINVAL;
458 458
459 wm8350_mask_irq(wm8350, irq); 459 wm8350_mask_irq(wm8350, irq);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 85f0e8cd875b..1f552c6e7579 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -85,7 +85,14 @@ static void mmc_blk_put(struct mmc_blk_data *md)
85 mutex_lock(&open_lock); 85 mutex_lock(&open_lock);
86 md->usage--; 86 md->usage--;
87 if (md->usage == 0) { 87 if (md->usage == 0) {
88 int devmaj = MAJOR(disk_devt(md->disk));
88 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; 89 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
90
91 if (!devmaj)
92 devidx = md->disk->first_minor >> MMC_SHIFT;
93
94 blk_cleanup_queue(md->queue.queue);
95
89 __clear_bit(devidx, dev_use); 96 __clear_bit(devidx, dev_use);
90 97
91 put_disk(md->disk); 98 put_disk(md->disk);
@@ -613,6 +620,7 @@ static int mmc_blk_probe(struct mmc_card *card)
613 return 0; 620 return 0;
614 621
615 out: 622 out:
623 mmc_cleanup_queue(&md->queue);
616 mmc_blk_put(md); 624 mmc_blk_put(md);
617 625
618 return err; 626 return err;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b9f1e84897cc..e7f8027165e6 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
74 } 74 }
75 75
76 mrq->cmd->arg = dev_addr; 76 mrq->cmd->arg = dev_addr;
77 if (!mmc_card_blockaddr(test->card))
78 mrq->cmd->arg <<= 9;
79
77 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 80 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
78 81
79 if (blocks == 1) 82 if (blocks == 1)
@@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
190 } 193 }
191 194
192 for (i = 0;i < BUFFER_SIZE / 512;i++) { 195 for (i = 0;i < BUFFER_SIZE / 512;i++) {
193 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 196 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
194 if (ret) 197 if (ret)
195 return ret; 198 return ret;
196 } 199 }
@@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
219 memset(test->buffer, 0, 512); 222 memset(test->buffer, 0, 512);
220 223
221 for (i = 0;i < BUFFER_SIZE / 512;i++) { 224 for (i = 0;i < BUFFER_SIZE / 512;i++) {
222 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 225 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
223 if (ret) 226 if (ret)
224 return ret; 227 return ret;
225 } 228 }
@@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
426 for (i = 0;i < sectors;i++) { 429 for (i = 0;i < sectors;i++) {
427 ret = mmc_test_buffer_transfer(test, 430 ret = mmc_test_buffer_transfer(test,
428 test->buffer + i * 512, 431 test->buffer + i * 512,
429 dev_addr + i * 512, 512, 0); 432 dev_addr + i, 512, 0);
430 if (ret) 433 if (ret)
431 return ret; 434 return ret;
432 } 435 }
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 49e582356c65..c5a7a855f4b1 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -90,9 +90,10 @@ static void mmc_request(struct request_queue *q)
90 struct request *req; 90 struct request *req;
91 91
92 if (!mq) { 92 if (!mq) {
93 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 93 while ((req = blk_fetch_request(q)) != NULL) {
94 while ((req = blk_fetch_request(q)) != NULL) 94 req->cmd_flags |= REQ_QUIET;
95 __blk_end_request_all(req, -EIO); 95 __blk_end_request_all(req, -EIO);
96 }
96 return; 97 return;
97 } 98 }
98 99
@@ -223,17 +224,18 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
223 struct request_queue *q = mq->queue; 224 struct request_queue *q = mq->queue;
224 unsigned long flags; 225 unsigned long flags;
225 226
226 /* Mark that we should start throwing out stragglers */
227 spin_lock_irqsave(q->queue_lock, flags);
228 q->queuedata = NULL;
229 spin_unlock_irqrestore(q->queue_lock, flags);
230
231 /* Make sure the queue isn't suspended, as that will deadlock */ 227 /* Make sure the queue isn't suspended, as that will deadlock */
232 mmc_queue_resume(mq); 228 mmc_queue_resume(mq);
233 229
234 /* Then terminate our worker thread */ 230 /* Then terminate our worker thread */
235 kthread_stop(mq->thread); 231 kthread_stop(mq->thread);
236 232
233 /* Empty the queue */
234 spin_lock_irqsave(q->queue_lock, flags);
235 q->queuedata = NULL;
236 blk_start_queue(q);
237 spin_unlock_irqrestore(q->queue_lock, flags);
238
237 if (mq->bounce_sg) 239 if (mq->bounce_sg)
238 kfree(mq->bounce_sg); 240 kfree(mq->bounce_sg);
239 mq->bounce_sg = NULL; 241 mq->bounce_sg = NULL;
@@ -245,8 +247,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
245 kfree(mq->bounce_buf); 247 kfree(mq->bounce_buf);
246 mq->bounce_buf = NULL; 248 mq->bounce_buf = NULL;
247 249
248 blk_cleanup_queue(mq->queue);
249
250 mq->card = NULL; 250 mq->card = NULL;
251} 251}
252EXPORT_SYMBOL(mmc_cleanup_queue); 252EXPORT_SYMBOL(mmc_cleanup_queue);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c11189446a1f..0eac6c814904 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -207,7 +207,7 @@ static int mmc_read_ext_csd(struct mmc_card *card)
207 } 207 }
208 208
209 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 209 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
210 if (card->ext_csd.rev > 3) { 210 if (card->ext_csd.rev > 5) {
211 printk(KERN_ERR "%s: unrecognised EXT_CSD structure " 211 printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
212 "version %d\n", mmc_hostname(card->host), 212 "version %d\n", mmc_hostname(card->host),
213 card->ext_csd.rev); 213 card->ext_csd.rev);
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 7cccc8523747..e22c3fa3516a 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -46,7 +46,9 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
46 clk |= 0x100; 46 clk |= 0x100;
47 } 47 }
48 48
49 sd_config_write8(host, CNF_SD_CLK_MODE, clk >> 22); 49 if (host->set_clk_div)
50 host->set_clk_div(host->pdev, (clk>>22) & 1);
51
50 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 52 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
51} 53}
52 54
@@ -427,12 +429,13 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
427 /* Power sequence - OFF -> ON -> UP */ 429 /* Power sequence - OFF -> ON -> UP */
428 switch (ios->power_mode) { 430 switch (ios->power_mode) {
429 case MMC_POWER_OFF: /* power down SD bus */ 431 case MMC_POWER_OFF: /* power down SD bus */
430 sd_config_write8(host, CNF_PWR_CTL_2, 0x00); 432 if (host->set_pwr)
433 host->set_pwr(host->pdev, 0);
431 tmio_mmc_clk_stop(host); 434 tmio_mmc_clk_stop(host);
432 break; 435 break;
433 case MMC_POWER_ON: /* power up SD bus */ 436 case MMC_POWER_ON: /* power up SD bus */
434 437 if (host->set_pwr)
435 sd_config_write8(host, CNF_PWR_CTL_2, 0x02); 438 host->set_pwr(host->pdev, 1);
436 break; 439 break;
437 case MMC_POWER_UP: /* start bus clock */ 440 case MMC_POWER_UP: /* start bus clock */
438 tmio_mmc_clk_start(host); 441 tmio_mmc_clk_start(host);
@@ -485,21 +488,15 @@ static int tmio_mmc_resume(struct platform_device *dev)
485{ 488{
486 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 489 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
487 struct mmc_host *mmc = platform_get_drvdata(dev); 490 struct mmc_host *mmc = platform_get_drvdata(dev);
488 struct tmio_mmc_host *host = mmc_priv(mmc);
489 int ret = 0; 491 int ret = 0;
490 492
491 /* Tell the MFD core we are ready to be enabled */ 493 /* Tell the MFD core we are ready to be enabled */
492 if (cell->enable) { 494 if (cell->resume) {
493 ret = cell->enable(dev); 495 ret = cell->resume(dev);
494 if (ret) 496 if (ret)
495 goto out; 497 goto out;
496 } 498 }
497 499
498 /* Enable the MMC/SD Control registers */
499 sd_config_write16(host, CNF_CMD, SDCREN);
500 sd_config_write32(host, CNF_CTL_BASE,
501 (dev->resource[0].start >> host->bus_shift) & 0xfffe);
502
503 mmc_resume_host(mmc); 500 mmc_resume_host(mmc);
504 501
505out: 502out:
@@ -514,17 +511,16 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
514{ 511{
515 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 512 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
516 struct tmio_mmc_data *pdata; 513 struct tmio_mmc_data *pdata;
517 struct resource *res_ctl, *res_cnf; 514 struct resource *res_ctl;
518 struct tmio_mmc_host *host; 515 struct tmio_mmc_host *host;
519 struct mmc_host *mmc; 516 struct mmc_host *mmc;
520 int ret = -EINVAL; 517 int ret = -EINVAL;
521 518
522 if (dev->num_resources != 3) 519 if (dev->num_resources != 2)
523 goto out; 520 goto out;
524 521
525 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); 522 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0);
526 res_cnf = platform_get_resource(dev, IORESOURCE_MEM, 1); 523 if (!res_ctl)
527 if (!res_ctl || !res_cnf)
528 goto out; 524 goto out;
529 525
530 pdata = cell->driver_data; 526 pdata = cell->driver_data;
@@ -539,8 +535,12 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
539 535
540 host = mmc_priv(mmc); 536 host = mmc_priv(mmc);
541 host->mmc = mmc; 537 host->mmc = mmc;
538 host->pdev = dev;
542 platform_set_drvdata(dev, mmc); 539 platform_set_drvdata(dev, mmc);
543 540
541 host->set_pwr = pdata->set_pwr;
542 host->set_clk_div = pdata->set_clk_div;
543
544 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ 544 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
545 host->bus_shift = resource_size(res_ctl) >> 10; 545 host->bus_shift = resource_size(res_ctl) >> 10;
546 546
@@ -548,10 +548,6 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
548 if (!host->ctl) 548 if (!host->ctl)
549 goto host_free; 549 goto host_free;
550 550
551 host->cnf = ioremap(res_cnf->start, resource_size(res_cnf));
552 if (!host->cnf)
553 goto unmap_ctl;
554
555 mmc->ops = &tmio_mmc_ops; 551 mmc->ops = &tmio_mmc_ops;
556 mmc->caps = MMC_CAP_4_BIT_DATA; 552 mmc->caps = MMC_CAP_4_BIT_DATA;
557 mmc->f_max = pdata->hclk; 553 mmc->f_max = pdata->hclk;
@@ -562,23 +558,9 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
562 if (cell->enable) { 558 if (cell->enable) {
563 ret = cell->enable(dev); 559 ret = cell->enable(dev);
564 if (ret) 560 if (ret)
565 goto unmap_cnf; 561 goto unmap_ctl;
566 } 562 }
567 563
568 /* Enable the MMC/SD Control registers */
569 sd_config_write16(host, CNF_CMD, SDCREN);
570 sd_config_write32(host, CNF_CTL_BASE,
571 (dev->resource[0].start >> host->bus_shift) & 0xfffe);
572
573 /* Disable SD power during suspend */
574 sd_config_write8(host, CNF_PWR_CTL_3, 0x01);
575
576 /* The below is required but why? FIXME */
577 sd_config_write8(host, CNF_STOP_CLK_CTL, 0x1f);
578
579 /* Power down SD bus*/
580 sd_config_write8(host, CNF_PWR_CTL_2, 0x00);
581
582 tmio_mmc_clk_stop(host); 564 tmio_mmc_clk_stop(host);
583 reset(host); 565 reset(host);
584 566
@@ -586,14 +568,14 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
586 if (ret >= 0) 568 if (ret >= 0)
587 host->irq = ret; 569 host->irq = ret;
588 else 570 else
589 goto unmap_cnf; 571 goto unmap_ctl;
590 572
591 disable_mmc_irqs(host, TMIO_MASK_ALL); 573 disable_mmc_irqs(host, TMIO_MASK_ALL);
592 574
593 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | 575 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
594 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); 576 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
595 if (ret) 577 if (ret)
596 goto unmap_cnf; 578 goto unmap_ctl;
597 579
598 mmc_add_host(mmc); 580 mmc_add_host(mmc);
599 581
@@ -605,8 +587,6 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
605 587
606 return 0; 588 return 0;
607 589
608unmap_cnf:
609 iounmap(host->cnf);
610unmap_ctl: 590unmap_ctl:
611 iounmap(host->ctl); 591 iounmap(host->ctl);
612host_free: 592host_free:
@@ -626,7 +606,6 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
626 mmc_remove_host(mmc); 606 mmc_remove_host(mmc);
627 free_irq(host->irq, host); 607 free_irq(host->irq, host);
628 iounmap(host->ctl); 608 iounmap(host->ctl);
629 iounmap(host->cnf);
630 mmc_free_host(mmc); 609 mmc_free_host(mmc);
631 } 610 }
632 611
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 9fa998594974..692dc23363b9 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -11,26 +11,6 @@
11 11
12#include <linux/highmem.h> 12#include <linux/highmem.h>
13 13
14#define CNF_CMD 0x04
15#define CNF_CTL_BASE 0x10
16#define CNF_INT_PIN 0x3d
17#define CNF_STOP_CLK_CTL 0x40
18#define CNF_GCLK_CTL 0x41
19#define CNF_SD_CLK_MODE 0x42
20#define CNF_PIN_STATUS 0x44
21#define CNF_PWR_CTL_1 0x48
22#define CNF_PWR_CTL_2 0x49
23#define CNF_PWR_CTL_3 0x4a
24#define CNF_CARD_DETECT_MODE 0x4c
25#define CNF_SD_SLOT 0x50
26#define CNF_EXT_GCLK_CTL_1 0xf0
27#define CNF_EXT_GCLK_CTL_2 0xf1
28#define CNF_EXT_GCLK_CTL_3 0xf9
29#define CNF_SD_LED_EN_1 0xfa
30#define CNF_SD_LED_EN_2 0xfe
31
32#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
33
34#define CTL_SD_CMD 0x00 14#define CTL_SD_CMD 0x00
35#define CTL_ARG_REG 0x04 15#define CTL_ARG_REG 0x04
36#define CTL_STOP_INTERNAL_ACTION 0x08 16#define CTL_STOP_INTERNAL_ACTION 0x08
@@ -110,7 +90,6 @@
110 90
111 91
112struct tmio_mmc_host { 92struct tmio_mmc_host {
113 void __iomem *cnf;
114 void __iomem *ctl; 93 void __iomem *ctl;
115 unsigned long bus_shift; 94 unsigned long bus_shift;
116 struct mmc_command *cmd; 95 struct mmc_command *cmd;
@@ -119,10 +98,16 @@ struct tmio_mmc_host {
119 struct mmc_host *mmc; 98 struct mmc_host *mmc;
120 int irq; 99 int irq;
121 100
101 /* Callbacks for clock / power control */
102 void (*set_pwr)(struct platform_device *host, int state);
103 void (*set_clk_div)(struct platform_device *host, int state);
104
122 /* pio related stuff */ 105 /* pio related stuff */
123 struct scatterlist *sg_ptr; 106 struct scatterlist *sg_ptr;
124 unsigned int sg_len; 107 unsigned int sg_len;
125 unsigned int sg_off; 108 unsigned int sg_off;
109
110 struct platform_device *pdev;
126}; 111};
127 112
128#include <linux/io.h> 113#include <linux/io.h>
@@ -163,25 +148,6 @@ static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
163 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); 148 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
164} 149}
165 150
166static inline void sd_config_write8(struct tmio_mmc_host *host, int addr,
167 u8 val)
168{
169 writeb(val, host->cnf + (addr << host->bus_shift));
170}
171
172static inline void sd_config_write16(struct tmio_mmc_host *host, int addr,
173 u16 val)
174{
175 writew(val, host->cnf + (addr << host->bus_shift));
176}
177
178static inline void sd_config_write32(struct tmio_mmc_host *host, int addr,
179 u32 val)
180{
181 writew(val, host->cnf + (addr << host->bus_shift));
182 writew(val >> 16, host->cnf + ((addr + 2) << host->bus_shift));
183}
184
185#include <linux/scatterlist.h> 151#include <linux/scatterlist.h>
186#include <linux/blkdev.h> 152#include <linux/blkdev.h>
187 153
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 4c364d44ad59..2de0cc823d60 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -549,4 +549,21 @@ config MTD_VMU
549 To build this as a module select M here, the module will be called 549 To build this as a module select M here, the module will be called
550 vmu-flash. 550 vmu-flash.
551 551
552config MTD_PISMO
553 tristate "MTD discovery driver for PISMO modules"
554 depends on I2C
555 depends on ARCH_VERSATILE
556 help
557 This driver allows for discovery of PISMO modules - see
558 <http://www.pismoworld.org/>. These are small modules containing
559 up to five memory devices (eg, SRAM, flash, DOC) described by an
560 I2C EEPROM.
561
562 This driver does not create any MTD maps itself; instead it
563 creates MTD physmap and MTD SRAM platform devices. If you
564 enable this option, you should consider enabling MTD_PHYSMAP
565 and/or MTD_PLATRAM according to the devices on your module.
566
567 When built as a module, it will be called pismo.ko
568
552endmenu 569endmenu
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
new file mode 100644
index 000000000000..c48cad271f5d
--- /dev/null
+++ b/drivers/mtd/maps/pismo.c
@@ -0,0 +1,320 @@
1/*
2 * PISMO memory driver - http://www.pismoworld.org/
3 *
4 * For ARM Realview and Versatile platforms
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License.
9 */
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/i2c.h>
13#include <linux/platform_device.h>
14#include <linux/spinlock.h>
15#include <linux/mutex.h>
16#include <linux/mtd/physmap.h>
17#include <linux/mtd/plat-ram.h>
18#include <linux/mtd/pismo.h>
19
20#define PISMO_NUM_CS 5
21
22struct pismo_cs_block {
23 u8 type;
24 u8 width;
25 __le16 access;
26 __le32 size;
27 u32 reserved[2];
28 char device[32];
29} __packed;
30
31struct pismo_eeprom {
32 struct pismo_cs_block cs[PISMO_NUM_CS];
33 char board[15];
34 u8 sum;
35} __packed;
36
37struct pismo_mem {
38 phys_addr_t base;
39 u32 size;
40 u16 access;
41 u8 width;
42 u8 type;
43};
44
45struct pismo_data {
46 struct i2c_client *client;
47 void (*vpp)(void *, int);
48 void *vpp_data;
49 struct platform_device *dev[PISMO_NUM_CS];
50};
51
52/* FIXME: set_vpp could do with a better calling convention */
53static struct pismo_data *vpp_pismo;
54static DEFINE_MUTEX(pismo_mutex);
55
56static int pismo_setvpp_probe_fix(struct pismo_data *pismo)
57{
58 mutex_lock(&pismo_mutex);
59 if (vpp_pismo) {
60 mutex_unlock(&pismo_mutex);
61 kfree(pismo);
62 return -EBUSY;
63 }
64 vpp_pismo = pismo;
65 mutex_unlock(&pismo_mutex);
66 return 0;
67}
68
69static void pismo_setvpp_remove_fix(struct pismo_data *pismo)
70{
71 mutex_lock(&pismo_mutex);
72 if (vpp_pismo == pismo)
73 vpp_pismo = NULL;
74 mutex_unlock(&pismo_mutex);
75}
76
77static void pismo_set_vpp(struct map_info *map, int on)
78{
79 struct pismo_data *pismo = vpp_pismo;
80
81 pismo->vpp(pismo->vpp_data, on);
82}
83/* end of hack */
84
85
86static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
87{
88 width &= 15;
89 if (width > 2)
90 return 0;
91 return 1 << width;
92}
93
94static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
95 u8 addr, size_t size)
96{
97 int ret;
98 struct i2c_msg msg[] = {
99 {
100 .addr = client->addr,
101 .len = sizeof(addr),
102 .buf = &addr,
103 }, {
104 .addr = client->addr,
105 .flags = I2C_M_RD,
106 .len = size,
107 .buf = buf,
108 },
109 };
110
111 ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
112
113 return ret == ARRAY_SIZE(msg) ? size : -EIO;
114}
115
116static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
117 struct pismo_mem *region, const char *name, void *pdata, size_t psize)
118{
119 struct platform_device *dev;
120 struct resource res = { };
121 phys_addr_t base = region.base;
122 int ret;
123
124 if (base == ~0)
125 return -ENXIO;
126
127 res.start = base;
128 res.end = base + region->size - 1;
129 res.flags = IORESOURCE_MEM;
130
131 dev = platform_device_alloc(name, i);
132 if (!dev)
133 return -ENOMEM;
134 dev->dev.parent = &pismo->client->dev;
135
136 do {
137 ret = platform_device_add_resources(dev, &res, 1);
138 if (ret)
139 break;
140
141 ret = platform_device_add_data(dev, pdata, psize);
142 if (ret)
143 break;
144
145 ret = platform_device_add(dev);
146 if (ret)
147 break;
148
149 pismo->dev[i] = dev;
150 return 0;
151 } while (0);
152
153 platform_device_put(dev);
154 return ret;
155}
156
157static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
158 struct pismo_mem *region)
159{
160 struct physmap_flash_data data = {
161 .width = region->width,
162 };
163
164 if (pismo->vpp)
165 data.set_vpp = pismo_set_vpp;
166
167 return pismo_add_device(pismo, i, region, "physmap-flash",
168 &data, sizeof(data));
169}
170
171static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
172 struct pismo_mem *region)
173{
174 struct platdata_mtd_ram data = {
175 .bankwidth = region->width,
176 };
177
178 return pismo_add_device(pismo, i, region, "mtd-ram",
179 &data, sizeof(data));
180}
181
182static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
183 const struct pismo_cs_block *cs, phys_addr_t base)
184{
185 struct device *dev = &pismo->client->dev;
186 struct pismo_mem region;
187
188 region.base = base;
189 region.type = cs->type;
190 region.width = pismo_width_to_bytes(cs->width);
191 region.access = le16_to_cpu(cs->access);
192 region.size = le32_to_cpu(cs->size);
193
194 if (region.width == 0) {
195 dev_err(dev, "cs%u: bad width: %02x, ignoring\n", i, cs->width);
196 return;
197 }
198
199 /*
200 * FIXME: may need to the platforms memory controller here, but at
201 * the moment we assume that it has already been correctly setup.
202 * The memory controller can also tell us the base address as well.
203 */
204
205 dev_info(dev, "cs%u: %.32s: type %02x access %u00ps size %uK\n",
206 i, cs->device, region.type, region.access, region.size / 1024);
207
208 switch (region.type) {
209 case 0:
210 break;
211 case 1:
212 /* static DOC */
213 break;
214 case 2:
215 /* static NOR */
216 pismo_add_nor(pismo, i, &region);
217 break;
218 case 3:
219 /* static RAM */
220 pismo_add_sram(pismo, i, &region);
221 break;
222 }
223}
224
225static int __devexit pismo_remove(struct i2c_client *client)
226{
227 struct pismo_data *pismo = i2c_get_clientdata(client);
228 int i;
229
230 for (i = 0; i < ARRAY_SIZE(pismo->dev); i++)
231 platform_device_unregister(pismo->dev[i]);
232
233 /* FIXME: set_vpp needs saner arguments */
234 pismo_setvpp_remove_fix(pismo);
235
236 kfree(pismo);
237
238 return 0;
239}
240
241static int __devinit pismo_probe(struct i2c_client *client,
242 const struct i2c_device_id *id)
243{
244 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
245 struct pismo_pdata *pdata = client->dev.platform_data;
246 struct pismo_eeprom eeprom;
247 struct pismo_data *pismo;
248 int ret, i;
249
250 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
251 dev_err(&client->dev, "functionality mismatch\n");
252 return -EIO;
253 }
254
255 pismo = kzalloc(sizeof(*pismo), GFP_KERNEL);
256 if (!pismo)
257 return -ENOMEM;
258
259 /* FIXME: set_vpp needs saner arguments */
260 ret = pismo_setvpp_probe_fix(pismo);
261 if (ret)
262 return ret;
263
264 pismo->client = client;
265 if (pdata) {
266 pismo->vpp = pdata->set_vpp;
267 pismo->vpp_data = pdata->vpp_data;
268 }
269 i2c_set_clientdata(client, pismo);
270
271 ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
272 if (ret < 0) {
273 dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
274 return ret;
275 }
276
277 dev_info(&client->dev, "%.15s board found\n", eeprom.board);
278
279 for (i = 0; i < ARRAY_SIZE(eeprom.cs); i++)
280 if (eeprom.cs[i].type != 0xff)
281 pismo_add_one(pismo, i, &eeprom.cs[i],
282 pdata->cs_addrs[i]);
283
284 return 0;
285}
286
287static const struct i2c_device_id pismo_id[] = {
288 { "pismo" },
289 { },
290};
291MODULE_DEVICE_TABLE(i2c, pismo_id);
292
293static struct i2c_driver pismo_driver = {
294 .driver = {
295 .name = "pismo",
296 .owner = THIS_MODULE,
297 },
298 .probe = pismo_probe,
299 .remove = __devexit_p(pismo_remove),
300 .id_table = pismo_id,
301};
302
303static int __init pismo_init(void)
304{
305 BUILD_BUG_ON(sizeof(struct pismo_cs_block) != 48);
306 BUILD_BUG_ON(sizeof(struct pismo_eeprom) != 256);
307
308 return i2c_add_driver(&pismo_driver);
309}
310module_init(pismo_init);
311
312static void __exit pismo_exit(void)
313{
314 i2c_del_driver(&pismo_driver);
315}
316module_exit(pismo_exit);
317
318MODULE_AUTHOR("Russell King <linux@arm.linux.org.uk>");
319MODULE_DESCRIPTION("PISMO memory driver");
320MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index a714ec482761..92e12df0917f 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -322,7 +322,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
322 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); 322 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
323 323
324 /* Panics must be written immediately */ 324 /* Panics must be written immediately */
325 if (reason == KMSG_DUMP_PANIC) { 325 if (reason != KMSG_DUMP_OOPS) {
326 if (!cxt->mtd->panic_write) 326 if (!cxt->mtd->panic_write)
327 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); 327 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
328 else 328 else
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 79fc4530987b..25c5dd03a837 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -147,6 +147,10 @@ static int scan_for_bad_eraseblocks(void)
147 } 147 }
148 memset(bbt, 0 , ebcnt); 148 memset(bbt, 0 , ebcnt);
149 149
150 /* NOR flash does not implement block_isbad */
151 if (mtd->block_isbad == NULL)
152 return 0;
153
150 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 154 printk(PRINT_PREF "scanning for bad eraseblocks\n");
151 for (i = 0; i < ebcnt; ++i) { 155 for (i = 0; i < ebcnt; ++i) {
152 bbt[i] = is_block_bad(i) ? 1 : 0; 156 bbt[i] = is_block_bad(i) ? 1 : 0;
@@ -184,7 +188,7 @@ static int __init mtd_readtest_init(void)
184 tmp = mtd->size; 188 tmp = mtd->size;
185 do_div(tmp, mtd->erasesize); 189 do_div(tmp, mtd->erasesize);
186 ebcnt = tmp; 190 ebcnt = tmp;
187 pgcnt = mtd->erasesize / mtd->writesize; 191 pgcnt = mtd->erasesize / pgsize;
188 192
189 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 193 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
190 "page size %u, count of eraseblocks %u, pages per " 194 "page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 141363a7e805..7fbb51d4eabe 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -301,6 +301,10 @@ static int scan_for_bad_eraseblocks(void)
301 } 301 }
302 memset(bbt, 0 , ebcnt); 302 memset(bbt, 0 , ebcnt);
303 303
304 /* NOR flash does not implement block_isbad */
305 if (mtd->block_isbad == NULL)
306 goto out;
307
304 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 308 printk(PRINT_PREF "scanning for bad eraseblocks\n");
305 for (i = 0; i < ebcnt; ++i) { 309 for (i = 0; i < ebcnt; ++i) {
306 bbt[i] = is_block_bad(i) ? 1 : 0; 310 bbt[i] = is_block_bad(i) ? 1 : 0;
@@ -309,6 +313,7 @@ static int scan_for_bad_eraseblocks(void)
309 cond_resched(); 313 cond_resched();
310 } 314 }
311 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 315 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
316out:
312 goodebcnt = ebcnt - bad; 317 goodebcnt = ebcnt - bad;
313 return 0; 318 return 0;
314} 319}
@@ -340,7 +345,7 @@ static int __init mtd_speedtest_init(void)
340 tmp = mtd->size; 345 tmp = mtd->size;
341 do_div(tmp, mtd->erasesize); 346 do_div(tmp, mtd->erasesize);
342 ebcnt = tmp; 347 ebcnt = tmp;
343 pgcnt = mtd->erasesize / mtd->writesize; 348 pgcnt = mtd->erasesize / pgsize;
344 349
345 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 350 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
346 "page size %u, count of eraseblocks %u, pages per " 351 "page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 63920476b57a..a99d3cd737d8 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -227,6 +227,10 @@ static int scan_for_bad_eraseblocks(void)
227 } 227 }
228 memset(bbt, 0 , ebcnt); 228 memset(bbt, 0 , ebcnt);
229 229
230 /* NOR flash does not implement block_isbad */
231 if (mtd->block_isbad == NULL)
232 return 0;
233
230 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 234 printk(PRINT_PREF "scanning for bad eraseblocks\n");
231 for (i = 0; i < ebcnt; ++i) { 235 for (i = 0; i < ebcnt; ++i) {
232 bbt[i] = is_block_bad(i) ? 1 : 0; 236 bbt[i] = is_block_bad(i) ? 1 : 0;
@@ -265,7 +269,7 @@ static int __init mtd_stresstest_init(void)
265 tmp = mtd->size; 269 tmp = mtd->size;
266 do_div(tmp, mtd->erasesize); 270 do_div(tmp, mtd->erasesize);
267 ebcnt = tmp; 271 ebcnt = tmp;
268 pgcnt = mtd->erasesize / mtd->writesize; 272 pgcnt = mtd->erasesize / pgsize;
269 273
270 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 274 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
271 "page size %u, count of eraseblocks %u, pages per " 275 "page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index f237ddbb2713..111ea41c4ecd 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -853,7 +853,6 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
853 break; 853 break;
854 } 854 }
855 855
856 req.name[req.name_len] = '\0';
857 err = verify_mkvol_req(ubi, &req); 856 err = verify_mkvol_req(ubi, &req);
858 if (err) 857 if (err)
859 break; 858 break;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 277786ebaa2c..1361574e2b00 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -291,8 +291,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
291 */ 291 */
292struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) 292struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
293{ 293{
294 int error, ubi_num, vol_id; 294 int error, ubi_num, vol_id, mod;
295 struct ubi_volume_desc *ret;
296 struct inode *inode; 295 struct inode *inode;
297 struct path path; 296 struct path path;
298 297
@@ -306,16 +305,16 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
306 return ERR_PTR(error); 305 return ERR_PTR(error);
307 306
308 inode = path.dentry->d_inode; 307 inode = path.dentry->d_inode;
308 mod = inode->i_mode;
309 ubi_num = ubi_major2num(imajor(inode)); 309 ubi_num = ubi_major2num(imajor(inode));
310 vol_id = iminor(inode) - 1; 310 vol_id = iminor(inode) - 1;
311 path_put(&path);
311 312
313 if (!S_ISCHR(mod))
314 return ERR_PTR(-EINVAL);
312 if (vol_id >= 0 && ubi_num >= 0) 315 if (vol_id >= 0 && ubi_num >= 0)
313 ret = ubi_open_volume(ubi_num, vol_id, mode); 316 return ubi_open_volume(ubi_num, vol_id, mode);
314 else 317 return ERR_PTR(-ENODEV);
315 ret = ERR_PTR(-ENODEV);
316
317 path_put(&path);
318 return ret;
319} 318}
320EXPORT_SYMBOL_GPL(ubi_open_volume_path); 319EXPORT_SYMBOL_GPL(ubi_open_volume_path);
321 320
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index c1d7b880c795..425bf5a3edd4 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -155,6 +155,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
155 if (err) 155 if (err)
156 return err; 156 return err;
157 vol->updating = 0; 157 vol->updating = 0;
158 return 0;
158 } 159 }
159 160
160 vol->upd_buf = vmalloc(ubi->leb_size); 161 vol->upd_buf = vmalloc(ubi->leb_size);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 1afc61e7455d..40044028d682 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -566,6 +566,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
566 vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); 566 vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
567 vol->alignment = be32_to_cpu(vtbl[i].alignment); 567 vol->alignment = be32_to_cpu(vtbl[i].alignment);
568 vol->data_pad = be32_to_cpu(vtbl[i].data_pad); 568 vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
569 vol->upd_marker = vtbl[i].upd_marker;
569 vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? 570 vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
570 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; 571 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
571 vol->name_len = be16_to_cpu(vtbl[i].name_len); 572 vol->name_len = be16_to_cpu(vtbl[i].name_len);
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index fbc231153e55..77cf0901a441 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -56,6 +56,7 @@ static const char version[] =
56#include <linux/errno.h> 56#include <linux/errno.h>
57#include <linux/netdevice.h> 57#include <linux/netdevice.h>
58#include <linux/etherdevice.h> 58#include <linux/etherdevice.h>
59#include <linux/if_ether.h>
59#include <linux/skbuff.h> 60#include <linux/skbuff.h>
60#include <linux/slab.h> 61#include <linux/slab.h>
61#include <linux/init.h> 62#include <linux/init.h>
@@ -734,8 +735,7 @@ static void init_82586_mem(struct net_device *dev)
734 memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10); 735 memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
735 736
736 /* Fill in the station address. */ 737 /* Fill in the station address. */
737 memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, 738 memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
738 sizeof(dev->dev_addr));
739 739
740 /* The Tx-block list is written as needed. We just set up the values. */ 740 /* The Tx-block list is written as needed. We just set up the values. */
741 lp->tx_cmd_link = IDLELOOP + 4; 741 lp->tx_cmd_link = IDLELOOP + 4;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e58a65391ad2..dd9a09c72dff 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2346,6 +2346,7 @@ config GELIC_NET
2346 2346
2347config GELIC_WIRELESS 2347config GELIC_WIRELESS
2348 bool "PS3 Wireless support" 2348 bool "PS3 Wireless support"
2349 depends on WLAN
2349 depends on GELIC_NET 2350 depends on GELIC_NET
2350 select WIRELESS_EXT 2351 select WIRELESS_EXT
2351 help 2352 help
@@ -2358,6 +2359,7 @@ config GELIC_WIRELESS
2358config GELIC_WIRELESS_OLD_PSK_INTERFACE 2359config GELIC_WIRELESS_OLD_PSK_INTERFACE
2359 bool "PS3 Wireless private PSK interface (OBSOLETE)" 2360 bool "PS3 Wireless private PSK interface (OBSOLETE)"
2360 depends on GELIC_WIRELESS 2361 depends on GELIC_WIRELESS
2362 select WEXT_PRIV
2361 help 2363 help
2362 This option retains the obsolete private interface to pass 2364 This option retains the obsolete private interface to pass
2363 the PSK from user space programs to the driver. The PSK 2365 the PSK from user space programs to the driver. The PSK
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index c37ee9e6b67b..39e1c0d39476 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -68,6 +68,7 @@ config W90P910_ETH
68 tristate "Nuvoton w90p910 Ethernet support" 68 tristate "Nuvoton w90p910 Ethernet support"
69 depends on ARM && ARCH_W90X900 69 depends on ARM && ARCH_W90X900
70 select PHYLIB 70 select PHYLIB
71 select MII
71 help 72 help
72 Say Y here if you want to use built-in Ethernet ports 73 Say Y here if you want to use built-in Ethernet ports
73 on w90p910 processor. 74 on w90p910 processor.
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index c5721cb38265..cc9ed8643910 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -663,7 +663,7 @@ static int lance_open( struct net_device *dev )
663 while (--i > 0) 663 while (--i > 0)
664 if (DREG & CSR0_IDON) 664 if (DREG & CSR0_IDON)
665 break; 665 break;
666 if (i < 0 || (DREG & CSR0_ERR)) { 666 if (i <= 0 || (DREG & CSR0_ERR)) {
667 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", 667 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
668 dev->name, i, DREG )); 668 dev->name, i, DREG ));
669 DREG = CSR0_STOP; 669 DREG = CSR0_STOP;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index c0451d75cdcf..ec52529394ad 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1959,12 +1959,15 @@ static int atl2_get_eeprom(struct net_device *netdev,
1959 return -ENOMEM; 1959 return -ENOMEM;
1960 1960
1961 for (i = first_dword; i < last_dword; i++) { 1961 for (i = first_dword; i < last_dword; i++) {
1962 if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) 1962 if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) {
1963 return -EIO; 1963 ret_val = -EIO;
1964 goto free;
1965 }
1964 } 1966 }
1965 1967
1966 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), 1968 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
1967 eeprom->len); 1969 eeprom->len);
1970free:
1968 kfree(eeprom_buff); 1971 kfree(eeprom_buff);
1969 1972
1970 return ret_val; 1973 return ret_val;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 62d9c9cc5671..1dd4403247ca 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev)
921 size = (res->end - res->start) + 1; 921 size = (res->end - res->start) + 1;
922 922
923 ax->mem2 = request_mem_region(res->start, size, pdev->name); 923 ax->mem2 = request_mem_region(res->start, size, pdev->name);
924 if (ax->mem == NULL) { 924 if (ax->mem2 == NULL) {
925 dev_err(&pdev->dev, "cannot reserve registers\n"); 925 dev_err(&pdev->dev, "cannot reserve registers\n");
926 ret = -ENXIO; 926 ret = -ENXIO;
927 goto exit_mem1; 927 goto exit_mem1;
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 9e56014d27ed..5bc74590c73e 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -275,8 +275,14 @@ struct be_adapter {
275 u32 tx_fc; /* Tx flow control */ 275 u32 tx_fc; /* Tx flow control */
276 int link_speed; 276 int link_speed;
277 u8 port_type; 277 u8 port_type;
278 u8 transceiver;
279 u8 generation; /* BladeEngine ASIC generation */
278}; 280};
279 281
282/* BladeEngine Generation numbers */
283#define BE_GEN2 2
284#define BE_GEN3 3
285
280extern const struct ethtool_ops be_ethtool_ops; 286extern const struct ethtool_ops be_ethtool_ops;
281 287
282#define drvr_stats(adapter) (&adapter->stats.drvr_stats) 288#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1b68bd98dc0c..006cb2efcd22 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -286,7 +286,7 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
286 MCC_WRB_SGE_CNT_SHIFT; 286 MCC_WRB_SGE_CNT_SHIFT;
287 wrb->payload_length = payload_len; 287 wrb->payload_length = payload_len;
288 wrb->tag0 = opcode; 288 wrb->tag0 = opcode;
289 be_dws_cpu_to_le(wrb, 20); 289 be_dws_cpu_to_le(wrb, 8);
290} 290}
291 291
292/* Don't touch the hdr after it's prepared */ 292/* Don't touch the hdr after it's prepared */
@@ -296,6 +296,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
296 req_hdr->opcode = opcode; 296 req_hdr->opcode = opcode;
297 req_hdr->subsystem = subsystem; 297 req_hdr->subsystem = subsystem;
298 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 298 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
299 req_hdr->version = 0;
299} 300}
300 301
301static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 302static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
@@ -1479,6 +1480,41 @@ err:
1479 return status; 1480 return status;
1480} 1481}
1481 1482
1483int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1484 u8 loopback_type, u8 enable)
1485{
1486 struct be_mcc_wrb *wrb;
1487 struct be_cmd_req_set_lmode *req;
1488 int status;
1489
1490 spin_lock_bh(&adapter->mcc_lock);
1491
1492 wrb = wrb_from_mccq(adapter);
1493 if (!wrb) {
1494 status = -EBUSY;
1495 goto err;
1496 }
1497
1498 req = embedded_payload(wrb);
1499
1500 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1501 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1502
1503 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1504 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1505 sizeof(*req));
1506
1507 req->src_port = port_num;
1508 req->dest_port = port_num;
1509 req->loopback_type = loopback_type;
1510 req->loopback_state = enable;
1511
1512 status = be_mcc_notify_wait(adapter);
1513err:
1514 spin_unlock_bh(&adapter->mcc_lock);
1515 return status;
1516}
1517
1482int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 1518int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1483 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) 1519 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1484{ 1520{
@@ -1501,6 +1537,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1501 1537
1502 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 1538 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1503 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); 1539 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1540 req->hdr.timeout = 4;
1504 1541
1505 req->pattern = cpu_to_le64(pattern); 1542 req->pattern = cpu_to_le64(pattern);
1506 req->src_port = cpu_to_le32(port_num); 1543 req->src_port = cpu_to_le32(port_num);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 92b87ef156ed..13b33c841083 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -155,6 +155,7 @@ struct be_mcc_mailbox {
155 155
156#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17 156#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
157#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18 157#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
158#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
158 159
159struct be_cmd_req_hdr { 160struct be_cmd_req_hdr {
160 u8 opcode; /* dword 0 */ 161 u8 opcode; /* dword 0 */
@@ -163,7 +164,8 @@ struct be_cmd_req_hdr {
163 u8 domain; /* dword 0 */ 164 u8 domain; /* dword 0 */
164 u32 timeout; /* dword 1 */ 165 u32 timeout; /* dword 1 */
165 u32 request_length; /* dword 2 */ 166 u32 request_length; /* dword 2 */
166 u32 rsvd; /* dword 3 */ 167 u8 version; /* dword 3 */
168 u8 rsvd[3]; /* dword 3 */
167}; 169};
168 170
169#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ 171#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
@@ -821,6 +823,19 @@ struct be_cmd_resp_loopback_test {
821 u32 ticks_compl; 823 u32 ticks_compl;
822}; 824};
823 825
826struct be_cmd_req_set_lmode {
827 struct be_cmd_req_hdr hdr;
828 u8 src_port;
829 u8 dest_port;
830 u8 loopback_type;
831 u8 loopback_state;
832};
833
834struct be_cmd_resp_set_lmode {
835 struct be_cmd_resp_hdr resp_hdr;
836 u8 rsvd0[4];
837};
838
824/********************** DDR DMA test *********************/ 839/********************** DDR DMA test *********************/
825struct be_cmd_req_ddrdma_test { 840struct be_cmd_req_ddrdma_test {
826 struct be_cmd_req_hdr hdr; 841 struct be_cmd_req_hdr hdr;
@@ -912,3 +927,5 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
912 u32 num_pkts, u64 pattern); 927 u32 num_pkts, u64 pattern);
913extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 928extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
914 u32 byte_cnt, struct be_dma_mem *cmd); 929 u32 byte_cnt, struct be_dma_mem *cmd);
930extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
931 u8 loopback_type, u8 enable);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 298b92cbd689..5d001c4deac1 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -118,6 +118,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
118#define BE_MAC_LOOPBACK 0x0 118#define BE_MAC_LOOPBACK 0x0
119#define BE_PHY_LOOPBACK 0x1 119#define BE_PHY_LOOPBACK 0x1
120#define BE_ONE_PORT_EXT_LOOPBACK 0x2 120#define BE_ONE_PORT_EXT_LOOPBACK 0x2
121#define BE_NO_LOOPBACK 0xff
121 122
122static void 123static void
123be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 124be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@@ -339,28 +340,50 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
339 340
340 status = be_cmd_read_port_type(adapter, adapter->port_num, 341 status = be_cmd_read_port_type(adapter, adapter->port_num,
341 &connector); 342 &connector);
342 switch (connector) { 343 if (!status) {
343 case 7: 344 switch (connector) {
344 ecmd->port = PORT_FIBRE; 345 case 7:
345 break; 346 ecmd->port = PORT_FIBRE;
346 default: 347 ecmd->transceiver = XCVR_EXTERNAL;
347 ecmd->port = PORT_TP; 348 break;
348 break; 349 case 0:
350 ecmd->port = PORT_TP;
351 ecmd->transceiver = XCVR_EXTERNAL;
352 break;
353 default:
354 ecmd->port = PORT_TP;
355 ecmd->transceiver = XCVR_INTERNAL;
356 break;
357 }
358 } else {
359 ecmd->port = PORT_AUI;
360 ecmd->transceiver = XCVR_INTERNAL;
349 } 361 }
350 362
351 /* Save for future use */ 363 /* Save for future use */
352 adapter->link_speed = ecmd->speed; 364 adapter->link_speed = ecmd->speed;
353 adapter->port_type = ecmd->port; 365 adapter->port_type = ecmd->port;
366 adapter->transceiver = ecmd->transceiver;
354 } else { 367 } else {
355 ecmd->speed = adapter->link_speed; 368 ecmd->speed = adapter->link_speed;
356 ecmd->port = adapter->port_type; 369 ecmd->port = adapter->port_type;
370 ecmd->transceiver = adapter->transceiver;
357 } 371 }
358 372
359 ecmd->duplex = DUPLEX_FULL; 373 ecmd->duplex = DUPLEX_FULL;
360 ecmd->autoneg = AUTONEG_DISABLE; 374 ecmd->autoneg = AUTONEG_DISABLE;
361 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
362 ecmd->phy_address = adapter->port_num; 375 ecmd->phy_address = adapter->port_num;
363 ecmd->transceiver = XCVR_INTERNAL; 376 switch (ecmd->port) {
377 case PORT_FIBRE:
378 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
379 break;
380 case PORT_TP:
381 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
382 break;
383 case PORT_AUI:
384 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
385 break;
386 }
364 387
365 return 0; 388 return 0;
366} 389}
@@ -489,6 +512,19 @@ err:
489 return ret; 512 return ret;
490} 513}
491 514
515static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
516 u64 *status)
517{
518 be_cmd_set_loopback(adapter, adapter->port_num,
519 loopback_type, 1);
520 *status = be_cmd_loopback_test(adapter, adapter->port_num,
521 loopback_type, 1500,
522 2, 0xabc);
523 be_cmd_set_loopback(adapter, adapter->port_num,
524 BE_NO_LOOPBACK, 1);
525 return *status;
526}
527
492static void 528static void
493be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 529be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
494{ 530{
@@ -497,23 +533,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
497 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 533 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
498 534
499 if (test->flags & ETH_TEST_FL_OFFLINE) { 535 if (test->flags & ETH_TEST_FL_OFFLINE) {
500 data[0] = be_cmd_loopback_test(adapter, adapter->port_num, 536 if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
501 BE_MAC_LOOPBACK, 1500, 537 &data[0]) != 0) {
502 2, 0xabc);
503 if (data[0] != 0)
504 test->flags |= ETH_TEST_FL_FAILED; 538 test->flags |= ETH_TEST_FL_FAILED;
505 539 }
506 data[1] = be_cmd_loopback_test(adapter, adapter->port_num, 540 if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
507 BE_PHY_LOOPBACK, 1500, 541 &data[1]) != 0) {
508 2, 0xabc);
509 if (data[1] != 0)
510 test->flags |= ETH_TEST_FL_FAILED; 542 test->flags |= ETH_TEST_FL_FAILED;
511 543 }
512 data[2] = be_cmd_loopback_test(adapter, adapter->port_num, 544 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
513 BE_ONE_PORT_EXT_LOOPBACK, 545 &data[2]) != 0) {
514 1500, 2, 0xabc);
515 if (data[2] != 0)
516 test->flags |= ETH_TEST_FL_FAILED; 546 test->flags |= ETH_TEST_FL_FAILED;
547 }
517 548
518 data[3] = be_test_ddr_dma(adapter); 549 data[3] = be_test_ddr_dma(adapter);
519 if (data[3] != 0) 550 if (data[3] != 0)
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 3a1f7902c16d..626b76c0ebc7 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -910,7 +910,7 @@ static inline struct page *be_alloc_pages(u32 size)
910static void be_post_rx_frags(struct be_adapter *adapter) 910static void be_post_rx_frags(struct be_adapter *adapter)
911{ 911{
912 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; 912 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
913 struct be_rx_page_info *page_info = NULL; 913 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
914 struct be_queue_info *rxq = &adapter->rx_obj.q; 914 struct be_queue_info *rxq = &adapter->rx_obj.q;
915 struct page *pagep = NULL; 915 struct page *pagep = NULL;
916 struct be_eth_rx_d *rxd; 916 struct be_eth_rx_d *rxd;
@@ -941,7 +941,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
941 rxd = queue_head_node(rxq); 941 rxd = queue_head_node(rxq);
942 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); 942 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
943 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); 943 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
944 queue_head_inc(rxq);
945 944
946 /* Any space left in the current big page for another frag? */ 945 /* Any space left in the current big page for another frag? */
947 if ((page_offset + rx_frag_size + rx_frag_size) > 946 if ((page_offset + rx_frag_size + rx_frag_size) >
@@ -949,10 +948,13 @@ static void be_post_rx_frags(struct be_adapter *adapter)
949 pagep = NULL; 948 pagep = NULL;
950 page_info->last_page_user = true; 949 page_info->last_page_user = true;
951 } 950 }
951
952 prev_page_info = page_info;
953 queue_head_inc(rxq);
952 page_info = &page_info_tbl[rxq->head]; 954 page_info = &page_info_tbl[rxq->head];
953 } 955 }
954 if (pagep) 956 if (pagep)
955 page_info->last_page_user = true; 957 prev_page_info->last_page_user = true;
956 958
957 if (posted) { 959 if (posted) {
958 atomic_add(posted, &rxq->used); 960 atomic_add(posted, &rxq->used);
@@ -1348,7 +1350,7 @@ static irqreturn_t be_intx(int irq, void *dev)
1348 int isr; 1350 int isr;
1349 1351
1350 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + 1352 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1351 be_pci_func(adapter) * CEV_ISR_SIZE); 1353 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1352 if (!isr) 1354 if (!isr)
1353 return IRQ_NONE; 1355 return IRQ_NONE;
1354 1356
@@ -2049,6 +2051,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
2049static int be_map_pci_bars(struct be_adapter *adapter) 2051static int be_map_pci_bars(struct be_adapter *adapter)
2050{ 2052{
2051 u8 __iomem *addr; 2053 u8 __iomem *addr;
2054 int pcicfg_reg;
2052 2055
2053 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), 2056 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2054 pci_resource_len(adapter->pdev, 2)); 2057 pci_resource_len(adapter->pdev, 2));
@@ -2062,8 +2065,13 @@ static int be_map_pci_bars(struct be_adapter *adapter)
2062 goto pci_map_err; 2065 goto pci_map_err;
2063 adapter->db = addr; 2066 adapter->db = addr;
2064 2067
2065 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), 2068 if (adapter->generation == BE_GEN2)
2066 pci_resource_len(adapter->pdev, 1)); 2069 pcicfg_reg = 1;
2070 else
2071 pcicfg_reg = 0;
2072
2073 addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
2074 pci_resource_len(adapter->pdev, pcicfg_reg));
2067 if (addr == NULL) 2075 if (addr == NULL)
2068 goto pci_map_err; 2076 goto pci_map_err;
2069 adapter->pcicfg = addr; 2077 adapter->pcicfg = addr;
@@ -2160,6 +2168,7 @@ static int be_stats_init(struct be_adapter *adapter)
2160 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); 2168 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2161 if (cmd->va == NULL) 2169 if (cmd->va == NULL)
2162 return -1; 2170 return -1;
2171 memset(cmd->va, 0, cmd->size);
2163 return 0; 2172 return 0;
2164} 2173}
2165 2174
@@ -2238,6 +2247,20 @@ static int __devinit be_probe(struct pci_dev *pdev,
2238 goto rel_reg; 2247 goto rel_reg;
2239 } 2248 }
2240 adapter = netdev_priv(netdev); 2249 adapter = netdev_priv(netdev);
2250
2251 switch (pdev->device) {
2252 case BE_DEVICE_ID1:
2253 case OC_DEVICE_ID1:
2254 adapter->generation = BE_GEN2;
2255 break;
2256 case BE_DEVICE_ID2:
2257 case OC_DEVICE_ID2:
2258 adapter->generation = BE_GEN3;
2259 break;
2260 default:
2261 adapter->generation = 0;
2262 }
2263
2241 adapter->pdev = pdev; 2264 adapter->pdev = pdev;
2242 pci_set_drvdata(pdev, adapter); 2265 pci_set_drvdata(pdev, adapter);
2243 adapter->netdev = netdev; 2266 adapter->netdev = netdev;
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 8ffea3990d07..0b23bc4f56c6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -33,6 +33,7 @@
33#include <asm/dma.h> 33#include <asm/dma.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35 35
36#include <asm/dpmc.h>
36#include <asm/blackfin.h> 37#include <asm/blackfin.h>
37#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
38#include <asm/portmux.h> 39#include <asm/portmux.h>
@@ -386,8 +387,8 @@ static int mii_probe(struct net_device *dev)
386 u32 sclk, mdc_div; 387 u32 sclk, mdc_div;
387 388
388 /* Enable PHY output early */ 389 /* Enable PHY output early */
389 if (!(bfin_read_VR_CTL() & PHYCLKOE)) 390 if (!(bfin_read_VR_CTL() & CLKBUFOE))
390 bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); 391 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
391 392
392 sclk = get_sclk(); 393 sclk = get_sclk();
393 mdc_div = ((sclk / MDC_CLK) / 2) - 1; 394 mdc_div = ((sclk / MDC_CLK) / 2) - 1;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 77ba13520d87..306c2b8165e2 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -7593,6 +7593,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7593 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { 7593 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7594 bnx2x_set_iscsi_eth_mac_addr(bp, 1); 7594 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7595 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; 7595 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7596 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7597 CNIC_SB_ID(bp));
7596 } 7598 }
7597 mutex_unlock(&bp->cnic_mutex); 7599 mutex_unlock(&bp->cnic_mutex);
7598#endif 7600#endif
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0fb7a4964e75..822f586d72af 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1580,7 +1580,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1580 // check if any partner replys 1580 // check if any partner replys
1581 if (best->is_individual) { 1581 if (best->is_individual) {
1582 pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n", 1582 pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
1583 best->slave->dev->master->name); 1583 best->slave ? best->slave->dev->master->name : "NULL");
1584 } 1584 }
1585 1585
1586 best->is_active = 1; 1586 best->is_active = 1;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3f0071cfe56b..efa0e41bf3ec 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3639,7 +3639,7 @@ static int bond_open(struct net_device *bond_dev)
3639 */ 3639 */
3640 if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) { 3640 if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
3641 /* something went wrong - fail the open operation */ 3641 /* something went wrong - fail the open operation */
3642 return -1; 3642 return -ENOMEM;
3643 } 3643 }
3644 3644
3645 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); 3645 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 9c5a1537939c..1a72ca066a17 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -990,7 +990,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
990 goto error_tx_buf; 990 goto error_tx_buf;
991 } 991 }
992 priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL); 992 priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
993 if (!priv->spi_tx_buf) { 993 if (!priv->spi_rx_buf) {
994 ret = -ENOMEM; 994 ret = -ENOMEM;
995 goto error_rx_buf; 995 goto error_rx_buf;
996 } 996 }
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index af9321617ce4..0e79cef95c0a 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1325,8 +1325,7 @@ net_open(struct net_device *dev)
1325 write_irq(dev, lp->chip_type, dev->irq); 1325 write_irq(dev, lp->chip_type, dev->irq);
1326 ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev); 1326 ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
1327 if (ret) { 1327 if (ret) {
1328 if (net_debug) 1328 printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq);
1329 printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq);
1330 goto bad_out; 1329 goto bad_out;
1331 } 1330 }
1332 } 1331 }
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd14727e4b..318a018ca7c5 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2079 struct sge_fl *fl, int len, int complete) 2079 struct sge_fl *fl, int len, int complete)
2080{ 2080{
2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2082 struct port_info *pi = netdev_priv(qs->netdev);
2082 struct sk_buff *skb = NULL; 2083 struct sk_buff *skb = NULL;
2083 struct cpl_rx_pkt *cpl; 2084 struct cpl_rx_pkt *cpl;
2084 struct skb_frag_struct *rx_frag; 2085 struct skb_frag_struct *rx_frag;
@@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2116 2117
2117 if (!nr_frags) { 2118 if (!nr_frags) {
2118 offset = 2 + sizeof(struct cpl_rx_pkt); 2119 offset = 2 + sizeof(struct cpl_rx_pkt);
2119 qs->lro_va = sd->pg_chunk.va + 2; 2120 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2120 }
2121 len -= offset;
2122 2121
2123 prefetch(qs->lro_va); 2122 if ((pi->rx_offload & T3_RX_CSUM) &&
2123 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2124 skb->ip_summed = CHECKSUM_UNNECESSARY;
2125 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2126 } else
2127 skb->ip_summed = CHECKSUM_NONE;
2128 } else
2129 cpl = qs->lro_va;
2130
2131 len -= offset;
2124 2132
2125 rx_frag += nr_frags; 2133 rx_frag += nr_frags;
2126 rx_frag->page = sd->pg_chunk.page; 2134 rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2136 return; 2144 return;
2137 2145
2138 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2146 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2139 skb->ip_summed = CHECKSUM_UNNECESSARY;
2140 cpl = qs->lro_va;
2141 2147
2142 if (unlikely(cpl->vlan_valid)) { 2148 if (unlikely(cpl->vlan_valid)) {
2143 struct net_device *dev = qs->netdev;
2144 struct port_info *pi = netdev_priv(dev);
2145 struct vlan_group *grp = pi->vlan_grp; 2149 struct vlan_group *grp = pi->vlan_grp;
2146 2150
2147 if (likely(grp != NULL)) { 2151 if (likely(grp != NULL)) {
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 34e03104c3c1..33c4fe26178c 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2711,6 +2711,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2711 SET_ETHTOOL_OPS(ndev, &ethtool_ops); 2711 SET_ETHTOOL_OPS(ndev, &ethtool_ops);
2712 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); 2712 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
2713 2713
2714 clk_enable(emac_clk);
2715
2714 /* register the network device */ 2716 /* register the network device */
2715 SET_NETDEV_DEV(ndev, &pdev->dev); 2717 SET_NETDEV_DEV(ndev, &pdev->dev);
2716 rc = register_netdev(ndev); 2718 rc = register_netdev(ndev);
@@ -2720,7 +2722,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2720 goto netdev_reg_err; 2722 goto netdev_reg_err;
2721 } 2723 }
2722 2724
2723 clk_enable(emac_clk);
2724 2725
2725 /* MII/Phy intialisation, mdio bus registration */ 2726 /* MII/Phy intialisation, mdio bus registration */
2726 emac_mii = mdiobus_alloc(); 2727 emac_mii = mdiobus_alloc();
@@ -2760,6 +2761,7 @@ mdiobus_quit:
2760 2761
2761netdev_reg_err: 2762netdev_reg_err:
2762mdio_alloc_err: 2763mdio_alloc_err:
2764 clk_disable(emac_clk);
2763no_irq_res: 2765no_irq_res:
2764 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2766 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2765 release_mem_region(res->start, res->end - res->start + 1); 2767 release_mem_region(res->start, res->end - res->start + 1);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2a567df3ea71..e8932db7ee77 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -326,6 +326,8 @@ struct e1000_adapter {
326 /* for ioport free */ 326 /* for ioport free */
327 int bars; 327 int bars;
328 int need_ioport; 328 int need_ioport;
329
330 bool discarding;
329}; 331};
330 332
331enum e1000_state_t { 333enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7e855f9bbd97..d29bb532eccf 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1698 rctl &= ~E1000_RCTL_SZ_4096; 1698 rctl &= ~E1000_RCTL_SZ_4096;
1699 rctl |= E1000_RCTL_BSEX; 1699 rctl |= E1000_RCTL_BSEX;
1700 switch (adapter->rx_buffer_len) { 1700 switch (adapter->rx_buffer_len) {
1701 case E1000_RXBUFFER_256:
1702 rctl |= E1000_RCTL_SZ_256;
1703 rctl &= ~E1000_RCTL_BSEX;
1704 break;
1705 case E1000_RXBUFFER_512:
1706 rctl |= E1000_RCTL_SZ_512;
1707 rctl &= ~E1000_RCTL_BSEX;
1708 break;
1709 case E1000_RXBUFFER_1024:
1710 rctl |= E1000_RCTL_SZ_1024;
1711 rctl &= ~E1000_RCTL_BSEX;
1712 break;
1713 case E1000_RXBUFFER_2048: 1701 case E1000_RXBUFFER_2048:
1714 default: 1702 default:
1715 rctl |= E1000_RCTL_SZ_2048; 1703 rctl |= E1000_RCTL_SZ_2048;
@@ -2802,13 +2790,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2802dma_error: 2790dma_error:
2803 dev_err(&pdev->dev, "TX DMA map failed\n"); 2791 dev_err(&pdev->dev, "TX DMA map failed\n");
2804 buffer_info->dma = 0; 2792 buffer_info->dma = 0;
2805 count--; 2793 if (count)
2806
2807 while (count >= 0) {
2808 count--; 2794 count--;
2809 i--; 2795
2810 if (i < 0) 2796 while (count--) {
2797 if (i==0)
2811 i += tx_ring->count; 2798 i += tx_ring->count;
2799 i--;
2812 buffer_info = &tx_ring->buffer_info[i]; 2800 buffer_info = &tx_ring->buffer_info[i];
2813 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2801 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2814 } 2802 }
@@ -3176,13 +3164,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3176 * however with the new *_jumbo_rx* routines, jumbo receives will use 3164 * however with the new *_jumbo_rx* routines, jumbo receives will use
3177 * fragmented skbs */ 3165 * fragmented skbs */
3178 3166
3179 if (max_frame <= E1000_RXBUFFER_256) 3167 if (max_frame <= E1000_RXBUFFER_2048)
3180 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3181 else if (max_frame <= E1000_RXBUFFER_512)
3182 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3183 else if (max_frame <= E1000_RXBUFFER_1024)
3184 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3185 else if (max_frame <= E1000_RXBUFFER_2048)
3186 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3168 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3187 else 3169 else
3188#if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3170#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
@@ -3850,13 +3832,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3850 3832
3851 length = le16_to_cpu(rx_desc->length); 3833 length = le16_to_cpu(rx_desc->length);
3852 /* !EOP means multiple descriptors were used to store a single 3834 /* !EOP means multiple descriptors were used to store a single
3853 * packet, also make sure the frame isn't just CRC only */ 3835 * packet, if thats the case we need to toss it. In fact, we
3854 if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { 3836 * to toss every packet with the EOP bit clear and the next
3837 * frame that _does_ have the EOP bit set, as it is by
3838 * definition only a frame fragment
3839 */
3840 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
3841 adapter->discarding = true;
3842
3843 if (adapter->discarding) {
3855 /* All receives must fit into a single buffer */ 3844 /* All receives must fit into a single buffer */
3856 E1000_DBG("%s: Receive packet consumed multiple" 3845 E1000_DBG("%s: Receive packet consumed multiple"
3857 " buffers\n", netdev->name); 3846 " buffers\n", netdev->name);
3858 /* recycle */ 3847 /* recycle */
3859 buffer_info->skb = skb; 3848 buffer_info->skb = skb;
3849 if (status & E1000_RXD_STAT_EOP)
3850 adapter->discarding = false;
3860 goto next_desc; 3851 goto next_desc;
3861 } 3852 }
3862 3853
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b979464091bb..02d67d047d96 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -237,6 +237,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
237 /* Set if manageability features are enabled. */ 237 /* Set if manageability features are enabled. */
238 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) 238 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
239 ? true : false; 239 ? true : false;
240 /* Adaptive IFS supported */
241 mac->adaptive_ifs = true;
240 242
241 /* check for link */ 243 /* check for link */
242 switch (hw->phy.media_type) { 244 switch (hw->phy.media_type) {
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cebbd9079d53..d236efaf7478 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -421,6 +421,7 @@ struct e1000_info {
421/* CRC Stripping defines */ 421/* CRC Stripping defines */
422#define FLAG2_CRC_STRIPPING (1 << 0) 422#define FLAG2_CRC_STRIPPING (1 << 0)
423#define FLAG2_HAS_PHY_WAKEUP (1 << 1) 423#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
424#define FLAG2_IS_DISCARDING (1 << 2)
424 425
425#define E1000_RX_DESC_PS(R, i) \ 426#define E1000_RX_DESC_PS(R, i) \
426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 427 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -582,7 +583,6 @@ extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
582extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); 583extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
583extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, 584extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
584 u16 data); 585 u16 data);
585extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
586extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); 586extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
587extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); 587extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
588extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); 588extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 3028f23da891..e2aa3b788564 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -224,6 +224,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
224 /* Set if manageability features are enabled. */ 224 /* Set if manageability features are enabled. */
225 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) 225 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
226 ? true : false; 226 ? true : false;
227 /* Adaptive IFS not supported */
228 mac->adaptive_ifs = false;
227 229
228 /* check for link */ 230 /* check for link */
229 switch (hw->phy.media_type) { 231 switch (hw->phy.media_type) {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 2784cf44a6f3..eccf29b75c41 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -818,6 +818,7 @@ struct e1000_mac_info {
818 818
819 u8 forced_speed_duplex; 819 u8 forced_speed_duplex;
820 820
821 bool adaptive_ifs;
821 bool arc_subsystem_valid; 822 bool arc_subsystem_valid;
822 bool autoneg; 823 bool autoneg;
823 bool autoneg_failed; 824 bool autoneg_failed;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9b09246af064..8b6ecd127889 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -138,6 +138,10 @@
138#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ 138#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
139#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ 139#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
140 140
141/* KMRN Mode Control */
142#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
143#define HV_KMRN_MDIO_SLOW 0x0400
144
141/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 145/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
142/* Offset 04h HSFSTS */ 146/* Offset 04h HSFSTS */
143union ich8_hws_flash_status { 147union ich8_hws_flash_status {
@@ -219,6 +223,7 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
219static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); 223static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
220static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); 224static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
221static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 225static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
226static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
222 227
223static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 228static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
224{ 229{
@@ -270,7 +275,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
270 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 275 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
271 276
272 phy->id = e1000_phy_unknown; 277 phy->id = e1000_phy_unknown;
273 e1000e_get_phy_id(hw); 278 ret_val = e1000e_get_phy_id(hw);
279 if (ret_val)
280 goto out;
281 if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
282 /*
283 * In case the PHY needs to be in mdio slow mode (eg. 82577),
284 * set slow mode and try to get the PHY id again.
285 */
286 ret_val = e1000_set_mdio_slow_mode_hv(hw);
287 if (ret_val)
288 goto out;
289 ret_val = e1000e_get_phy_id(hw);
290 if (ret_val)
291 goto out;
292 }
274 phy->type = e1000e_get_phy_type_from_id(phy->id); 293 phy->type = e1000e_get_phy_type_from_id(phy->id);
275 294
276 switch (phy->type) { 295 switch (phy->type) {
@@ -292,6 +311,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
292 break; 311 break;
293 } 312 }
294 313
314out:
295 return ret_val; 315 return ret_val;
296} 316}
297 317
@@ -454,6 +474,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
454 mac->rar_entry_count--; 474 mac->rar_entry_count--;
455 /* Set if manageability features are enabled. */ 475 /* Set if manageability features are enabled. */
456 mac->arc_subsystem_valid = true; 476 mac->arc_subsystem_valid = true;
477 /* Adaptive IFS supported */
478 mac->adaptive_ifs = true;
457 479
458 /* LED operations */ 480 /* LED operations */
459 switch (mac->type) { 481 switch (mac->type) {
@@ -1074,16 +1096,44 @@ out:
1074 1096
1075 1097
1076/** 1098/**
1099 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1100 * @hw: pointer to the HW structure
1101 **/
1102static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1103{
1104 s32 ret_val;
1105 u16 data;
1106
1107 ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
1108 if (ret_val)
1109 return ret_val;
1110
1111 data |= HV_KMRN_MDIO_SLOW;
1112
1113 ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
1114
1115 return ret_val;
1116}
1117
1118/**
1077 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 1119 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1078 * done after every PHY reset. 1120 * done after every PHY reset.
1079 **/ 1121 **/
1080static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) 1122static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1081{ 1123{
1082 s32 ret_val = 0; 1124 s32 ret_val = 0;
1125 u16 phy_data;
1083 1126
1084 if (hw->mac.type != e1000_pchlan) 1127 if (hw->mac.type != e1000_pchlan)
1085 return ret_val; 1128 return ret_val;
1086 1129
1130 /* Set MDIO slow mode before any other MDIO access */
1131 if (hw->phy.type == e1000_phy_82577) {
1132 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1133 if (ret_val)
1134 goto out;
1135 }
1136
1087 if (((hw->phy.type == e1000_phy_82577) && 1137 if (((hw->phy.type == e1000_phy_82577) &&
1088 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || 1138 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1089 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { 1139 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
@@ -1116,16 +1166,32 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1116 1166
1117 hw->phy.addr = 1; 1167 hw->phy.addr = 1;
1118 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 1168 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1169 hw->phy.ops.release(hw);
1119 if (ret_val) 1170 if (ret_val)
1120 goto out; 1171 goto out;
1121 hw->phy.ops.release(hw);
1122 1172
1123 /* 1173 /*
1124 * Configure the K1 Si workaround during phy reset assuming there is 1174 * Configure the K1 Si workaround during phy reset assuming there is
1125 * link so that it disables K1 if link is in 1Gbps. 1175 * link so that it disables K1 if link is in 1Gbps.
1126 */ 1176 */
1127 ret_val = e1000_k1_gig_workaround_hv(hw, true); 1177 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1178 if (ret_val)
1179 goto out;
1128 1180
1181 /* Workaround for link disconnects on a busy hub in half duplex */
1182 ret_val = hw->phy.ops.acquire(hw);
1183 if (ret_val)
1184 goto out;
1185 ret_val = hw->phy.ops.read_reg_locked(hw,
1186 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1187 &phy_data);
1188 if (ret_val)
1189 goto release;
1190 ret_val = hw->phy.ops.write_reg_locked(hw,
1191 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1192 phy_data & 0x00FF);
1193release:
1194 hw->phy.ops.release(hw);
1129out: 1195out:
1130 return ret_val; 1196 return ret_val;
1131} 1197}
@@ -1182,6 +1248,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1182 /* Allow time for h/w to get to a quiescent state after reset */ 1248 /* Allow time for h/w to get to a quiescent state after reset */
1183 mdelay(10); 1249 mdelay(10);
1184 1250
1251 /* Perform any necessary post-reset workarounds */
1185 if (hw->mac.type == e1000_pchlan) { 1252 if (hw->mac.type == e1000_pchlan) {
1186 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 1253 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1187 if (ret_val) 1254 if (ret_val)
@@ -2482,6 +2549,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2482 if (!ret_val) 2549 if (!ret_val)
2483 e1000_release_swflag_ich8lan(hw); 2550 e1000_release_swflag_ich8lan(hw);
2484 2551
2552 /* Perform any necessary post-reset workarounds */
2553 if (hw->mac.type == e1000_pchlan)
2554 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2555
2485 if (ctrl & E1000_CTRL_PHY_RST) 2556 if (ctrl & E1000_CTRL_PHY_RST)
2486 ret_val = hw->phy.ops.get_cfg_done(hw); 2557 ret_val = hw->phy.ops.get_cfg_done(hw);
2487 2558
@@ -2526,9 +2597,6 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2526 kab |= E1000_KABGTXD_BGSQLBIAS; 2597 kab |= E1000_KABGTXD_BGSQLBIAS;
2527 ew32(KABGTXD, kab); 2598 ew32(KABGTXD, kab);
2528 2599
2529 if (hw->mac.type == e1000_pchlan)
2530 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2531
2532out: 2600out:
2533 return ret_val; 2601 return ret_val;
2534} 2602}
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a86c17548c1e..2fa9b36a2c5a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -125,6 +125,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
125void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) 125void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
126{ 126{
127 u32 i; 127 u32 i;
128 u8 mac_addr[ETH_ALEN] = {0};
128 129
129 /* Setup the receive address */ 130 /* Setup the receive address */
130 e_dbg("Programming MAC Address into RAR[0]\n"); 131 e_dbg("Programming MAC Address into RAR[0]\n");
@@ -133,12 +134,8 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
133 134
134 /* Zero out the other (rar_entry_count - 1) receive addresses */ 135 /* Zero out the other (rar_entry_count - 1) receive addresses */
135 e_dbg("Clearing RAR[1-%u]\n", rar_count-1); 136 e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
136 for (i = 1; i < rar_count; i++) { 137 for (i = 1; i < rar_count; i++)
137 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); 138 e1000e_rar_set(hw, mac_addr, i);
138 e1e_flush();
139 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
140 e1e_flush();
141 }
142} 139}
143 140
144/** 141/**
@@ -164,10 +161,19 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
164 161
165 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 162 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
166 163
167 rar_high |= E1000_RAH_AV; 164 /* If MAC address zero, no need to set the AV bit */
165 if (rar_low || rar_high)
166 rar_high |= E1000_RAH_AV;
168 167
169 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); 168 /*
170 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); 169 * Some bridges will combine consecutive 32-bit writes into
170 * a single burst write, which will malfunction on some parts.
171 * The flushes avoid this.
172 */
173 ew32(RAL(index), rar_low);
174 e1e_flush();
175 ew32(RAH(index), rar_high);
176 e1e_flush();
171} 177}
172 178
173/** 179/**
@@ -1609,6 +1615,11 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
1609{ 1615{
1610 struct e1000_mac_info *mac = &hw->mac; 1616 struct e1000_mac_info *mac = &hw->mac;
1611 1617
1618 if (!mac->adaptive_ifs) {
1619 e_dbg("Not in Adaptive IFS mode!\n");
1620 goto out;
1621 }
1622
1612 mac->current_ifs_val = 0; 1623 mac->current_ifs_val = 0;
1613 mac->ifs_min_val = IFS_MIN; 1624 mac->ifs_min_val = IFS_MIN;
1614 mac->ifs_max_val = IFS_MAX; 1625 mac->ifs_max_val = IFS_MAX;
@@ -1617,6 +1628,8 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
1617 1628
1618 mac->in_ifs_mode = false; 1629 mac->in_ifs_mode = false;
1619 ew32(AIT, 0); 1630 ew32(AIT, 0);
1631out:
1632 return;
1620} 1633}
1621 1634
1622/** 1635/**
@@ -1630,6 +1643,11 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1630{ 1643{
1631 struct e1000_mac_info *mac = &hw->mac; 1644 struct e1000_mac_info *mac = &hw->mac;
1632 1645
1646 if (!mac->adaptive_ifs) {
1647 e_dbg("Not in Adaptive IFS mode!\n");
1648 goto out;
1649 }
1650
1633 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { 1651 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1634 if (mac->tx_packet_delta > MIN_NUM_XMITS) { 1652 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1635 mac->in_ifs_mode = true; 1653 mac->in_ifs_mode = true;
@@ -1650,6 +1668,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1650 ew32(AIT, 0); 1668 ew32(AIT, 0);
1651 } 1669 }
1652 } 1670 }
1671out:
1672 return;
1653} 1673}
1654 1674
1655/** 1675/**
@@ -2287,10 +2307,12 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2287 s32 ret_val, hdr_csum, csum; 2307 s32 ret_val, hdr_csum, csum;
2288 u8 i, len; 2308 u8 i, len;
2289 2309
2310 hw->mac.tx_pkt_filtering = true;
2311
2290 /* No manageability, no filtering */ 2312 /* No manageability, no filtering */
2291 if (!e1000e_check_mng_mode(hw)) { 2313 if (!e1000e_check_mng_mode(hw)) {
2292 hw->mac.tx_pkt_filtering = false; 2314 hw->mac.tx_pkt_filtering = false;
2293 return 0; 2315 goto out;
2294 } 2316 }
2295 2317
2296 /* 2318 /*
@@ -2298,9 +2320,9 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2298 * reason, disable filtering. 2320 * reason, disable filtering.
2299 */ 2321 */
2300 ret_val = e1000_mng_enable_host_if(hw); 2322 ret_val = e1000_mng_enable_host_if(hw);
2301 if (ret_val != 0) { 2323 if (ret_val) {
2302 hw->mac.tx_pkt_filtering = false; 2324 hw->mac.tx_pkt_filtering = false;
2303 return ret_val; 2325 goto out;
2304 } 2326 }
2305 2327
2306 /* Read in the header. Length and offset are in dwords. */ 2328 /* Read in the header. Length and offset are in dwords. */
@@ -2319,17 +2341,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2319 */ 2341 */
2320 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { 2342 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2321 hw->mac.tx_pkt_filtering = true; 2343 hw->mac.tx_pkt_filtering = true;
2322 return 1; 2344 goto out;
2323 } 2345 }
2324 2346
2325 /* Cookie area is valid, make the final check for filtering. */ 2347 /* Cookie area is valid, make the final check for filtering. */
2326 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { 2348 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2327 hw->mac.tx_pkt_filtering = false; 2349 hw->mac.tx_pkt_filtering = false;
2328 return 0; 2350 goto out;
2329 } 2351 }
2330 2352
2331 hw->mac.tx_pkt_filtering = true; 2353out:
2332 return 1; 2354 return hw->mac.tx_pkt_filtering;
2333} 2355}
2334 2356
2335/** 2357/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 762b697ce731..57f149b75fbe 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
450 450
451 length = le16_to_cpu(rx_desc->length); 451 length = le16_to_cpu(rx_desc->length);
452 452
453 /* !EOP means multiple descriptors were used to store a single 453 /*
454 * packet, also make sure the frame isn't just CRC only */ 454 * !EOP means multiple descriptors were used to store a single
455 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 455 * packet, if that's the case we need to toss it. In fact, we
456 * need to toss every packet with the EOP bit clear and the
457 * next frame that _does_ have the EOP bit set, as it is by
458 * definition only a frame fragment
459 */
460 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
461 adapter->flags2 |= FLAG2_IS_DISCARDING;
462
463 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
456 /* All receives must fit into a single buffer */ 464 /* All receives must fit into a single buffer */
457 e_dbg("Receive packet consumed multiple buffers\n"); 465 e_dbg("Receive packet consumed multiple buffers\n");
458 /* recycle */ 466 /* recycle */
459 buffer_info->skb = skb; 467 buffer_info->skb = skb;
468 if (status & E1000_RXD_STAT_EOP)
469 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
460 goto next_desc; 470 goto next_desc;
461 } 471 }
462 472
@@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
745 PCI_DMA_FROMDEVICE); 755 PCI_DMA_FROMDEVICE);
746 buffer_info->dma = 0; 756 buffer_info->dma = 0;
747 757
748 if (!(staterr & E1000_RXD_STAT_EOP)) { 758 /* see !EOP comment in other rx routine */
759 if (!(staterr & E1000_RXD_STAT_EOP))
760 adapter->flags2 |= FLAG2_IS_DISCARDING;
761
762 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
749 e_dbg("Packet Split buffers didn't pick up the full " 763 e_dbg("Packet Split buffers didn't pick up the full "
750 "packet\n"); 764 "packet\n");
751 dev_kfree_skb_irq(skb); 765 dev_kfree_skb_irq(skb);
766 if (staterr & E1000_RXD_STAT_EOP)
767 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
752 goto next_desc; 768 goto next_desc;
753 } 769 }
754 770
@@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1118 1134
1119 rx_ring->next_to_clean = 0; 1135 rx_ring->next_to_clean = 0;
1120 rx_ring->next_to_use = 0; 1136 rx_ring->next_to_use = 0;
1137 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1121 1138
1122 writel(0, adapter->hw.hw_addr + rx_ring->head); 1139 writel(0, adapter->hw.hw_addr + rx_ring->head);
1123 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1140 writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -2333,18 +2350,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2333 rctl &= ~E1000_RCTL_SZ_4096; 2350 rctl &= ~E1000_RCTL_SZ_4096;
2334 rctl |= E1000_RCTL_BSEX; 2351 rctl |= E1000_RCTL_BSEX;
2335 switch (adapter->rx_buffer_len) { 2352 switch (adapter->rx_buffer_len) {
2336 case 256:
2337 rctl |= E1000_RCTL_SZ_256;
2338 rctl &= ~E1000_RCTL_BSEX;
2339 break;
2340 case 512:
2341 rctl |= E1000_RCTL_SZ_512;
2342 rctl &= ~E1000_RCTL_BSEX;
2343 break;
2344 case 1024:
2345 rctl |= E1000_RCTL_SZ_1024;
2346 rctl &= ~E1000_RCTL_BSEX;
2347 break;
2348 case 2048: 2353 case 2048:
2349 default: 2354 default:
2350 rctl |= E1000_RCTL_SZ_2048; 2355 rctl |= E1000_RCTL_SZ_2048;
@@ -3315,24 +3320,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3315 if ((hw->phy.type == e1000_phy_82578) || 3320 if ((hw->phy.type == e1000_phy_82578) ||
3316 (hw->phy.type == e1000_phy_82577)) { 3321 (hw->phy.type == e1000_phy_82577)) {
3317 e1e_rphy(hw, HV_SCC_UPPER, &phy_data); 3322 e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
3318 e1e_rphy(hw, HV_SCC_LOWER, &phy_data); 3323 if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
3319 adapter->stats.scc += phy_data; 3324 adapter->stats.scc += phy_data;
3320 3325
3321 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); 3326 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
3322 e1e_rphy(hw, HV_ECOL_LOWER, &phy_data); 3327 if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
3323 adapter->stats.ecol += phy_data; 3328 adapter->stats.ecol += phy_data;
3324 3329
3325 e1e_rphy(hw, HV_MCC_UPPER, &phy_data); 3330 e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
3326 e1e_rphy(hw, HV_MCC_LOWER, &phy_data); 3331 if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
3327 adapter->stats.mcc += phy_data; 3332 adapter->stats.mcc += phy_data;
3328 3333
3329 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); 3334 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
3330 e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data); 3335 if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
3331 adapter->stats.latecol += phy_data; 3336 adapter->stats.latecol += phy_data;
3332 3337
3333 e1e_rphy(hw, HV_DC_UPPER, &phy_data); 3338 e1e_rphy(hw, HV_DC_UPPER, &phy_data);
3334 e1e_rphy(hw, HV_DC_LOWER, &phy_data); 3339 if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
3335 adapter->stats.dc += phy_data; 3340 adapter->stats.dc += phy_data;
3336 } else { 3341 } else {
3337 adapter->stats.scc += er32(SCC); 3342 adapter->stats.scc += er32(SCC);
3338 adapter->stats.ecol += er32(ECOL); 3343 adapter->stats.ecol += er32(ECOL);
@@ -3360,8 +3365,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3360 if ((hw->phy.type == e1000_phy_82578) || 3365 if ((hw->phy.type == e1000_phy_82578) ||
3361 (hw->phy.type == e1000_phy_82577)) { 3366 (hw->phy.type == e1000_phy_82577)) {
3362 e1e_rphy(hw, HV_COLC_UPPER, &phy_data); 3367 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3363 e1e_rphy(hw, HV_COLC_LOWER, &phy_data); 3368 if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
3364 hw->mac.collision_delta = phy_data; 3369 hw->mac.collision_delta = phy_data;
3365 } else { 3370 } else {
3366 hw->mac.collision_delta = er32(COLC); 3371 hw->mac.collision_delta = er32(COLC);
3367 } 3372 }
@@ -3372,8 +3377,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3372 if ((hw->phy.type == e1000_phy_82578) || 3377 if ((hw->phy.type == e1000_phy_82578) ||
3373 (hw->phy.type == e1000_phy_82577)) { 3378 (hw->phy.type == e1000_phy_82577)) {
3374 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); 3379 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3375 e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data); 3380 if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
3376 adapter->stats.tncrs += phy_data; 3381 adapter->stats.tncrs += phy_data;
3377 } else { 3382 } else {
3378 if ((hw->mac.type != e1000_82574) && 3383 if ((hw->mac.type != e1000_82574) &&
3379 (hw->mac.type != e1000_82583)) 3384 (hw->mac.type != e1000_82583))
@@ -3781,7 +3786,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
3781 0, IPPROTO_TCP, 0); 3786 0, IPPROTO_TCP, 0);
3782 cmd_length = E1000_TXD_CMD_IP; 3787 cmd_length = E1000_TXD_CMD_IP;
3783 ipcse = skb_transport_offset(skb) - 1; 3788 ipcse = skb_transport_offset(skb) - 1;
3784 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3789 } else if (skb_is_gso_v6(skb)) {
3785 ipv6_hdr(skb)->payload_len = 0; 3790 ipv6_hdr(skb)->payload_len = 0;
3786 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3791 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3787 &ipv6_hdr(skb)->daddr, 3792 &ipv6_hdr(skb)->daddr,
@@ -3962,13 +3967,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3962dma_error: 3967dma_error:
3963 dev_err(&pdev->dev, "TX DMA map failed\n"); 3968 dev_err(&pdev->dev, "TX DMA map failed\n");
3964 buffer_info->dma = 0; 3969 buffer_info->dma = 0;
3965 count--; 3970 if (count)
3966
3967 while (count >= 0) {
3968 count--; 3971 count--;
3969 i--; 3972
3970 if (i < 0) 3973 while (count--) {
3974 if (i==0)
3971 i += tx_ring->count; 3975 i += tx_ring->count;
3976 i--;
3972 buffer_info = &tx_ring->buffer_info[i]; 3977 buffer_info = &tx_ring->buffer_info[i];
3973 e1000_put_txbuf(adapter, buffer_info);; 3978 e1000_put_txbuf(adapter, buffer_info);;
3974 } 3979 }
@@ -4317,13 +4322,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4317 * fragmented skbs 4322 * fragmented skbs
4318 */ 4323 */
4319 4324
4320 if (max_frame <= 256) 4325 if (max_frame <= 2048)
4321 adapter->rx_buffer_len = 256;
4322 else if (max_frame <= 512)
4323 adapter->rx_buffer_len = 512;
4324 else if (max_frame <= 1024)
4325 adapter->rx_buffer_len = 1024;
4326 else if (max_frame <= 2048)
4327 adapter->rx_buffer_len = 2048; 4326 adapter->rx_buffer_len = 2048;
4328 else 4327 else
4329 adapter->rx_buffer_len = 4096; 4328 adapter->rx_buffer_len = 4096;
@@ -4674,6 +4673,7 @@ static int e1000_resume(struct pci_dev *pdev)
4674 4673
4675 pci_set_power_state(pdev, PCI_D0); 4674 pci_set_power_state(pdev, PCI_D0);
4676 pci_restore_state(pdev); 4675 pci_restore_state(pdev);
4676 pci_save_state(pdev);
4677 e1000e_disable_l1aspm(pdev); 4677 e1000e_disable_l1aspm(pdev);
4678 4678
4679 err = pci_enable_device_mem(pdev); 4679 err = pci_enable_device_mem(pdev);
@@ -4825,6 +4825,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4825 } else { 4825 } else {
4826 pci_set_master(pdev); 4826 pci_set_master(pdev);
4827 pci_restore_state(pdev); 4827 pci_restore_state(pdev);
4828 pci_save_state(pdev);
4828 4829
4829 pci_enable_wake(pdev, PCI_D3hot, 0); 4830 pci_enable_wake(pdev, PCI_D3hot, 0);
4830 pci_enable_wake(pdev, PCI_D3cold, 0); 4831 pci_enable_wake(pdev, PCI_D3cold, 0);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 55a2c0acfee7..7f3ceb9dad6a 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -152,32 +152,9 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
152 if (phy->id != 0 && phy->id != PHY_REVISION_MASK) 152 if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
153 goto out; 153 goto out;
154 154
155 /*
156 * If the PHY ID is still unknown, we may have an 82577
157 * without link. We will try again after setting Slow MDIC
158 * mode. No harm in trying again in this case since the PHY
159 * ID is unknown at this point anyway.
160 */
161 ret_val = phy->ops.acquire(hw);
162 if (ret_val)
163 goto out;
164 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
165 if (ret_val)
166 goto out;
167 phy->ops.release(hw);
168
169 retry_count++; 155 retry_count++;
170 } 156 }
171out: 157out:
172 /* Revert to MDIO fast mode, if applicable */
173 if (retry_count) {
174 ret_val = phy->ops.acquire(hw);
175 if (ret_val)
176 return ret_val;
177 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
178 phy->ops.release(hw);
179 }
180
181 return ret_val; 158 return ret_val;
182} 159}
183 160
@@ -2791,38 +2768,6 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
2791} 2768}
2792 2769
2793/** 2770/**
2794 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2795 * @hw: pointer to the HW structure
2796 * @slow: true for slow mode, false for normal mode
2797 *
2798 * Assumes semaphore already acquired.
2799 **/
2800s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
2801{
2802 s32 ret_val = 0;
2803 u16 data = 0;
2804
2805 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
2806 hw->phy.addr = 1;
2807 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2808 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
2809 if (ret_val)
2810 goto out;
2811
2812 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
2813 (0x2180 | (slow << 10)));
2814 if (ret_val)
2815 goto out;
2816
2817 /* dummy read when reverting to fast mode - throw away result */
2818 if (!slow)
2819 ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
2820
2821out:
2822 return ret_val;
2823}
2824
2825/**
2826 * __e1000_read_phy_reg_hv - Read HV PHY register 2771 * __e1000_read_phy_reg_hv - Read HV PHY register
2827 * @hw: pointer to the HW structure 2772 * @hw: pointer to the HW structure
2828 * @offset: register offset to be read 2773 * @offset: register offset to be read
@@ -2839,7 +2784,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2839 s32 ret_val; 2784 s32 ret_val;
2840 u16 page = BM_PHY_REG_PAGE(offset); 2785 u16 page = BM_PHY_REG_PAGE(offset);
2841 u16 reg = BM_PHY_REG_NUM(offset); 2786 u16 reg = BM_PHY_REG_NUM(offset);
2842 bool in_slow_mode = false;
2843 2787
2844 if (!locked) { 2788 if (!locked) {
2845 ret_val = hw->phy.ops.acquire(hw); 2789 ret_val = hw->phy.ops.acquire(hw);
@@ -2847,16 +2791,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2847 return ret_val; 2791 return ret_val;
2848 } 2792 }
2849 2793
2850 /* Workaround failure in MDIO access while cable is disconnected */
2851 if ((hw->phy.type == e1000_phy_82577) &&
2852 !(er32(STATUS) & E1000_STATUS_LU)) {
2853 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
2854 if (ret_val)
2855 goto out;
2856
2857 in_slow_mode = true;
2858 }
2859
2860 /* Page 800 works differently than the rest so it has its own func */ 2794 /* Page 800 works differently than the rest so it has its own func */
2861 if (page == BM_WUC_PAGE) { 2795 if (page == BM_WUC_PAGE) {
2862 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, 2796 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -2893,10 +2827,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2893 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2827 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2894 data); 2828 data);
2895out: 2829out:
2896 /* Revert to MDIO fast mode, if applicable */
2897 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2898 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
2899
2900 if (!locked) 2830 if (!locked)
2901 hw->phy.ops.release(hw); 2831 hw->phy.ops.release(hw);
2902 2832
@@ -2948,7 +2878,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2948 s32 ret_val; 2878 s32 ret_val;
2949 u16 page = BM_PHY_REG_PAGE(offset); 2879 u16 page = BM_PHY_REG_PAGE(offset);
2950 u16 reg = BM_PHY_REG_NUM(offset); 2880 u16 reg = BM_PHY_REG_NUM(offset);
2951 bool in_slow_mode = false;
2952 2881
2953 if (!locked) { 2882 if (!locked) {
2954 ret_val = hw->phy.ops.acquire(hw); 2883 ret_val = hw->phy.ops.acquire(hw);
@@ -2956,16 +2885,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2956 return ret_val; 2885 return ret_val;
2957 } 2886 }
2958 2887
2959 /* Workaround failure in MDIO access while cable is disconnected */
2960 if ((hw->phy.type == e1000_phy_82577) &&
2961 !(er32(STATUS) & E1000_STATUS_LU)) {
2962 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
2963 if (ret_val)
2964 goto out;
2965
2966 in_slow_mode = true;
2967 }
2968
2969 /* Page 800 works differently than the rest so it has its own func */ 2888 /* Page 800 works differently than the rest so it has its own func */
2970 if (page == BM_WUC_PAGE) { 2889 if (page == BM_WUC_PAGE) {
2971 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, 2890 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -3019,10 +2938,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
3019 data); 2938 data);
3020 2939
3021out: 2940out:
3022 /* Revert to MDIO fast mode, if applicable */
3023 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
3024 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
3025
3026 if (!locked) 2941 if (!locked)
3027 hw->phy.ops.release(hw); 2942 hw->phy.ops.release(hw);
3028 2943
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 25fabb3eedc5..d5160edf2fcf 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -46,6 +46,11 @@
46#include "gianfar.h" 46#include "gianfar.h"
47#include "fsl_pq_mdio.h" 47#include "fsl_pq_mdio.h"
48 48
49struct fsl_pq_mdio_priv {
50 void __iomem *map;
51 struct fsl_pq_mdio __iomem *regs;
52};
53
49/* 54/*
50 * Write value to the PHY at mii_id at register regnum, 55 * Write value to the PHY at mii_id at register regnum,
51 * on the bus attached to the local interface, which may be different from the 56 * on the bus attached to the local interface, which may be different from the
@@ -105,7 +110,9 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
105 110
106static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus) 111static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
107{ 112{
108 return (void __iomem __force *)bus->priv; 113 struct fsl_pq_mdio_priv *priv = bus->priv;
114
115 return priv->regs;
109} 116}
110 117
111/* 118/*
@@ -266,6 +273,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
266{ 273{
267 struct device_node *np = ofdev->node; 274 struct device_node *np = ofdev->node;
268 struct device_node *tbi; 275 struct device_node *tbi;
276 struct fsl_pq_mdio_priv *priv;
269 struct fsl_pq_mdio __iomem *regs = NULL; 277 struct fsl_pq_mdio __iomem *regs = NULL;
270 void __iomem *map; 278 void __iomem *map;
271 u32 __iomem *tbipa; 279 u32 __iomem *tbipa;
@@ -274,14 +282,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
274 u64 addr = 0, size = 0; 282 u64 addr = 0, size = 0;
275 int err = 0; 283 int err = 0;
276 284
285 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
286 if (!priv)
287 return -ENOMEM;
288
277 new_bus = mdiobus_alloc(); 289 new_bus = mdiobus_alloc();
278 if (NULL == new_bus) 290 if (NULL == new_bus)
279 return -ENOMEM; 291 goto err_free_priv;
280 292
281 new_bus->name = "Freescale PowerQUICC MII Bus", 293 new_bus->name = "Freescale PowerQUICC MII Bus",
282 new_bus->read = &fsl_pq_mdio_read, 294 new_bus->read = &fsl_pq_mdio_read,
283 new_bus->write = &fsl_pq_mdio_write, 295 new_bus->write = &fsl_pq_mdio_write,
284 new_bus->reset = &fsl_pq_mdio_reset, 296 new_bus->reset = &fsl_pq_mdio_reset,
297 new_bus->priv = priv;
285 fsl_pq_mdio_bus_name(new_bus->id, np); 298 fsl_pq_mdio_bus_name(new_bus->id, np);
286 299
287 /* Set the PHY base address */ 300 /* Set the PHY base address */
@@ -291,6 +304,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
291 err = -ENOMEM; 304 err = -ENOMEM;
292 goto err_free_bus; 305 goto err_free_bus;
293 } 306 }
307 priv->map = map;
294 308
295 if (of_device_is_compatible(np, "fsl,gianfar-mdio") || 309 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
296 of_device_is_compatible(np, "fsl,gianfar-tbi") || 310 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
@@ -298,8 +312,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
298 of_device_is_compatible(np, "ucc_geth_phy")) 312 of_device_is_compatible(np, "ucc_geth_phy"))
299 map -= offsetof(struct fsl_pq_mdio, miimcfg); 313 map -= offsetof(struct fsl_pq_mdio, miimcfg);
300 regs = map; 314 regs = map;
301 315 priv->regs = regs;
302 new_bus->priv = (void __force *)regs;
303 316
304 new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); 317 new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
305 318
@@ -392,10 +405,11 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
392err_free_irqs: 405err_free_irqs:
393 kfree(new_bus->irq); 406 kfree(new_bus->irq);
394err_unmap_regs: 407err_unmap_regs:
395 iounmap(regs); 408 iounmap(priv->map);
396err_free_bus: 409err_free_bus:
397 kfree(new_bus); 410 kfree(new_bus);
398 411err_free_priv:
412 kfree(priv);
399 return err; 413 return err;
400} 414}
401 415
@@ -404,14 +418,16 @@ static int fsl_pq_mdio_remove(struct of_device *ofdev)
404{ 418{
405 struct device *device = &ofdev->dev; 419 struct device *device = &ofdev->dev;
406 struct mii_bus *bus = dev_get_drvdata(device); 420 struct mii_bus *bus = dev_get_drvdata(device);
421 struct fsl_pq_mdio_priv *priv = bus->priv;
407 422
408 mdiobus_unregister(bus); 423 mdiobus_unregister(bus);
409 424
410 dev_set_drvdata(device, NULL); 425 dev_set_drvdata(device, NULL);
411 426
412 iounmap(fsl_pq_mdio_get_regs(bus)); 427 iounmap(priv->map);
413 bus->priv = NULL; 428 bus->priv = NULL;
414 mdiobus_free(bus); 429 mdiobus_free(bus);
430 kfree(priv);
415 431
416 return 0; 432 return 0;
417} 433}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index e0620d084644..8bd3c9f17532 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev);
143static void gfar_clear_exact_match(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev);
144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
147 146
148MODULE_AUTHOR("Freescale Semiconductor, Inc"); 147MODULE_AUTHOR("Freescale Semiconductor, Inc");
149MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 148MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@@ -455,7 +454,6 @@ static const struct net_device_ops gfar_netdev_ops = {
455 .ndo_set_multicast_list = gfar_set_multi, 454 .ndo_set_multicast_list = gfar_set_multi,
456 .ndo_tx_timeout = gfar_timeout, 455 .ndo_tx_timeout = gfar_timeout,
457 .ndo_do_ioctl = gfar_ioctl, 456 .ndo_do_ioctl = gfar_ioctl,
458 .ndo_select_queue = gfar_select_queue,
459 .ndo_get_stats = gfar_get_stats, 457 .ndo_get_stats = gfar_get_stats,
460 .ndo_vlan_rx_register = gfar_vlan_rx_register, 458 .ndo_vlan_rx_register = gfar_vlan_rx_register,
461 .ndo_set_mac_address = eth_mac_addr, 459 .ndo_set_mac_address = eth_mac_addr,
@@ -506,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
506 return priv->vlgrp || priv->rx_csum_enable; 504 return priv->vlgrp || priv->rx_csum_enable;
507} 505}
508 506
509u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
510{
511 return skb_get_queue_mapping(skb);
512}
513static void free_tx_pointers(struct gfar_private *priv) 507static void free_tx_pointers(struct gfar_private *priv)
514{ 508{
515 int i = 0; 509 int i = 0;
@@ -2470,10 +2464,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2470 fcb = (struct rxfcb *)skb->data; 2464 fcb = (struct rxfcb *)skb->data;
2471 2465
2472 /* Remove the FCB from the skb */ 2466 /* Remove the FCB from the skb */
2473 skb_set_queue_mapping(skb, fcb->rq);
2474 /* Remove the padded bytes, if there are any */ 2467 /* Remove the padded bytes, if there are any */
2475 if (amount_pull) 2468 if (amount_pull) {
2469 skb_record_rx_queue(skb, fcb->rq);
2476 skb_pull(skb, amount_pull); 2470 skb_pull(skb, amount_pull);
2471 }
2477 2472
2478 if (priv->rx_csum_enable) 2473 if (priv->rx_csum_enable)
2479 gfar_rx_checksum(skb, fcb); 2474 gfar_rx_checksum(skb, fcb);
@@ -2554,7 +2549,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2554 /* Remove the FCS from the packet length */ 2549 /* Remove the FCS from the packet length */
2555 skb_put(skb, pkt_len); 2550 skb_put(skb, pkt_len);
2556 rx_queue->stats.rx_bytes += pkt_len; 2551 rx_queue->stats.rx_bytes += pkt_len;
2557 2552 skb_record_rx_queue(skb, rx_queue->qindex);
2558 gfar_process_frame(dev, skb, amount_pull); 2553 gfar_process_frame(dev, skb, amount_pull);
2559 2554
2560 } else { 2555 } else {
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ae5f11c8fc13..bdadf3e23c94 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -248,6 +248,7 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
248{ 248{
249 unsigned char *ptr; 249 unsigned char *ptr;
250 struct bpqdev *bpq; 250 struct bpqdev *bpq;
251 struct net_device *orig_dev;
251 int size; 252 int size;
252 253
253 /* 254 /*
@@ -282,8 +283,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
282 283
283 bpq = netdev_priv(dev); 284 bpq = netdev_priv(dev);
284 285
286 orig_dev = dev;
285 if ((dev = bpq_get_ether_dev(dev)) == NULL) { 287 if ((dev = bpq_get_ether_dev(dev)) == NULL) {
286 dev->stats.tx_dropped++; 288 orig_dev->stats.tx_dropped++;
287 kfree_skb(skb); 289 kfree_skb(skb);
288 return NETDEV_TX_OK; 290 return NETDEV_TX_OK;
289 } 291 }
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 090a6d3af112..052c74091d91 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -87,6 +87,7 @@ History:
87#include <linux/module.h> 87#include <linux/module.h>
88#include <linux/netdevice.h> 88#include <linux/netdevice.h>
89#include <linux/etherdevice.h> 89#include <linux/etherdevice.h>
90#include <linux/if_ether.h>
90#include <linux/skbuff.h> 91#include <linux/skbuff.h>
91#include <linux/bitops.h> 92#include <linux/bitops.h>
92 93
@@ -988,7 +989,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
988 989
989 /* copy out MAC address */ 990 /* copy out MAC address */
990 991
991 for (z = 0; z < sizeof(dev->dev_addr); z++) 992 for (z = 0; z < ETH_ALEN; z++)
992 dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z); 993 dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
993 994
994 /* print config */ 995 /* print config */
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e8e9e9194a88..c505b50d1fa3 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1096,9 +1096,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1096 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1096 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1097 } else { 1097 } else {
1098 /* Set PCS register for forced link */ 1098 /* Set PCS register for forced link */
1099 reg |= E1000_PCS_LCTL_FSD | /* Force Speed */ 1099 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1100 E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
1101 E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
1102 1100
1103 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1101 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1104 } 1102 }
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 5c9d73e9bb8d..3670a66401b8 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -457,15 +457,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
457 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; 457 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
458 458
459 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); 459 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
460 if (ret_val)
461 goto out;
462
463 /* Set number of link attempts before downshift */
464 ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data);
465 if (ret_val)
466 goto out;
467 phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK;
468 ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data);
469 460
470out: 461out:
471 return ret_val; 462 return ret_val;
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index ac9d5272650d..f771a6c08777 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1795,7 +1795,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1795 /* dual port cards only support WoL on port A from now on 1795 /* dual port cards only support WoL on port A from now on
1796 * unless it was enabled in the eeprom for port B 1796 * unless it was enabled in the eeprom for port B
1797 * so exclude FUNC_1 ports from having WoL enabled */ 1797 * so exclude FUNC_1 ports from having WoL enabled */
1798 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 && 1798 if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
1799 !adapter->eeprom_wol) { 1799 !adapter->eeprom_wol) {
1800 wol->supported = 0; 1800 wol->supported = 0;
1801 break; 1801 break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 78963a0e128d..c881347cb26d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
422 if (tx_queue > IGB_N0_QUEUE) 422 if (tx_queue > IGB_N0_QUEUE)
423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
424 if (!adapter->msix_entries && msix_vector == 0)
425 msixbm |= E1000_EIMS_OTHER;
424 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 426 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
425 q_vector->eims_value = msixbm; 427 q_vector->eims_value = msixbm;
426 break; 428 break;
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
877{ 879{
878 struct net_device *netdev = adapter->netdev; 880 struct net_device *netdev = adapter->netdev;
879 struct pci_dev *pdev = adapter->pdev; 881 struct pci_dev *pdev = adapter->pdev;
880 struct e1000_hw *hw = &adapter->hw;
881 int err = 0; 882 int err = 0;
882 883
883 if (adapter->msix_entries) { 884 if (adapter->msix_entries) {
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
909 igb_setup_all_tx_resources(adapter); 910 igb_setup_all_tx_resources(adapter);
910 igb_setup_all_rx_resources(adapter); 911 igb_setup_all_rx_resources(adapter);
911 } else { 912 } else {
912 switch (hw->mac.type) { 913 igb_assign_vector(adapter->q_vector[0], 0);
913 case e1000_82575:
914 wr32(E1000_MSIXBM(0),
915 (E1000_EICR_RX_QUEUE0 |
916 E1000_EICR_TX_QUEUE0 |
917 E1000_EIMS_OTHER));
918 break;
919 case e1000_82580:
920 case e1000_82576:
921 wr32(E1000_IVAR0, E1000_IVAR_VALID);
922 break;
923 default:
924 break;
925 }
926 } 914 }
927 915
928 if (adapter->flags & IGB_FLAG_HAS_MSI) { 916 if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter)
1140 } 1128 }
1141 if (adapter->msix_entries) 1129 if (adapter->msix_entries)
1142 igb_configure_msix(adapter); 1130 igb_configure_msix(adapter);
1131 else
1132 igb_assign_vector(adapter->q_vector[0], 0);
1143 1133
1144 /* Clear any pending interrupts. */ 1134 /* Clear any pending interrupts. */
1145 rd32(E1000_ICR); 1135 rd32(E1000_ICR);
@@ -1306,13 +1296,8 @@ void igb_reset(struct igb_adapter *adapter)
1306 hwm = min(((pba << 10) * 9 / 10), 1296 hwm = min(((pba << 10) * 9 / 10),
1307 ((pba << 10) - 2 * adapter->max_frame_size)); 1297 ((pba << 10) - 2 * adapter->max_frame_size));
1308 1298
1309 if (mac->type < e1000_82576) { 1299 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1310 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1300 fc->low_water = fc->high_water - 16;
1311 fc->low_water = fc->high_water - 8;
1312 } else {
1313 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1314 fc->low_water = fc->high_water - 16;
1315 }
1316 fc->pause_time = 0xFFFF; 1301 fc->pause_time = 0xFFFF;
1317 fc->send_xon = 1; 1302 fc->send_xon = 1;
1318 fc->current_mode = fc->requested_mode; 1303 fc->current_mode = fc->requested_mode;
@@ -3427,7 +3412,7 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
3427 iph->daddr, 0, 3412 iph->daddr, 0,
3428 IPPROTO_TCP, 3413 IPPROTO_TCP,
3429 0); 3414 0);
3430 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3415 } else if (skb_is_gso_v6(skb)) {
3431 ipv6_hdr(skb)->payload_len = 0; 3416 ipv6_hdr(skb)->payload_len = 0;
3432 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3417 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3433 &ipv6_hdr(skb)->daddr, 3418 &ipv6_hdr(skb)->daddr,
@@ -3589,6 +3574,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3589 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3574 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3590 struct skb_frag_struct *frag; 3575 struct skb_frag_struct *frag;
3591 3576
3577 count++;
3592 i++; 3578 i++;
3593 if (i == tx_ring->count) 3579 if (i == tx_ring->count)
3594 i = 0; 3580 i = 0;
@@ -3610,7 +3596,6 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3610 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3596 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3611 goto dma_error; 3597 goto dma_error;
3612 3598
3613 count++;
3614 } 3599 }
3615 3600
3616 tx_ring->buffer_info[i].skb = skb; 3601 tx_ring->buffer_info[i].skb = skb;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index e9dd95f136aa..2aa71a766c35 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1963,7 +1963,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1963 iph->daddr, 0, 1963 iph->daddr, 0,
1964 IPPROTO_TCP, 1964 IPPROTO_TCP,
1965 0); 1965 0);
1966 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 1966 } else if (skb_is_gso_v6(skb)) {
1967 ipv6_hdr(skb)->payload_len = 0; 1967 ipv6_hdr(skb)->payload_len = 0;
1968 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1968 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1969 &ipv6_hdr(skb)->daddr, 1969 &ipv6_hdr(skb)->daddr,
@@ -2117,6 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2117 /* set time_stamp *before* dma to help avoid a possible race */ 2117 /* set time_stamp *before* dma to help avoid a possible race */
2118 buffer_info->time_stamp = jiffies; 2118 buffer_info->time_stamp = jiffies;
2119 buffer_info->next_to_watch = i; 2119 buffer_info->next_to_watch = i;
2120 buffer_info->mapped_as_page = false;
2120 buffer_info->dma = pci_map_single(pdev, skb->data, len, 2121 buffer_info->dma = pci_map_single(pdev, skb->data, len,
2121 PCI_DMA_TODEVICE); 2122 PCI_DMA_TODEVICE);
2122 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2123 if (pci_dma_mapping_error(pdev, buffer_info->dma))
@@ -2126,6 +2127,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2126 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2127 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2127 struct skb_frag_struct *frag; 2128 struct skb_frag_struct *frag;
2128 2129
2130 count++;
2129 i++; 2131 i++;
2130 if (i == tx_ring->count) 2132 if (i == tx_ring->count)
2131 i = 0; 2133 i = 0;
@@ -2146,7 +2148,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2146 PCI_DMA_TODEVICE); 2148 PCI_DMA_TODEVICE);
2147 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2149 if (pci_dma_mapping_error(pdev, buffer_info->dma))
2148 goto dma_error; 2150 goto dma_error;
2149 count++;
2150 } 2151 }
2151 2152
2152 tx_ring->buffer_info[i].skb = skb; 2153 tx_ring->buffer_info[i].skb = skb;
@@ -2163,14 +2164,14 @@ dma_error:
2163 buffer_info->length = 0; 2164 buffer_info->length = 0;
2164 buffer_info->next_to_watch = 0; 2165 buffer_info->next_to_watch = 0;
2165 buffer_info->mapped_as_page = false; 2166 buffer_info->mapped_as_page = false;
2166 count--; 2167 if (count)
2168 count--;
2167 2169
2168 /* clear timestamp and dma mappings for remaining portion of packet */ 2170 /* clear timestamp and dma mappings for remaining portion of packet */
2169 while (count >= 0) { 2171 while (count--) {
2170 count--; 2172 if (i==0)
2171 i--;
2172 if (i < 0)
2173 i += tx_ring->count; 2173 i += tx_ring->count;
2174 i--;
2174 buffer_info = &tx_ring->buffer_info[i]; 2175 buffer_info = &tx_ring->buffer_info[i];
2175 igbvf_put_txbuf(adapter, buffer_info); 2176 igbvf_put_txbuf(adapter, buffer_info);
2176 } 2177 }
@@ -2763,7 +2764,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2763 err = hw->mac.ops.reset_hw(hw); 2764 err = hw->mac.ops.reset_hw(hw);
2764 if (err) { 2765 if (err) {
2765 dev_info(&pdev->dev, 2766 dev_info(&pdev->dev,
2766 "PF still in reset state, assigning new address\n"); 2767 "PF still in reset state, assigning new address."
2768 " Is the PF interface up?\n");
2767 random_ether_addr(hw->mac.addr); 2769 random_ether_addr(hw->mac.addr);
2768 } else { 2770 } else {
2769 err = hw->mac.ops.read_mac_addr(hw); 2771 err = hw->mac.ops.read_mac_addr(hw);
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index bcd0f01d5feb..593d1a4f217c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1363,13 +1363,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1363dma_error: 1363dma_error:
1364 dev_err(&pdev->dev, "TX DMA map failed\n"); 1364 dev_err(&pdev->dev, "TX DMA map failed\n");
1365 buffer_info->dma = 0; 1365 buffer_info->dma = 0;
1366 count--; 1366 if (count)
1367
1368 while (count >= 0) {
1369 count--; 1367 count--;
1370 i--; 1368
1371 if (i < 0) 1369 while (count--) {
1370 if (i==0)
1372 i += tx_ring->count; 1371 i += tx_ring->count;
1372 i--;
1373 buffer_info = &tx_ring->buffer_info[i]; 1373 buffer_info = &tx_ring->buffer_info[i];
1374 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1374 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1375 } 1375 }
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 21b41f42b61c..bfef0ebcba9a 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 10 Gigabit PCI Express Linux driver 3# Intel 10 Gigabit PCI Express Linux driver
4# Copyright(c) 1999 - 2009 Intel Corporation. 4# Copyright(c) 1999 - 2010 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 8da8eb535084..303e7bd39b67 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 204177d78cec..35a06b47587b 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
357 u32 fctrl_reg; 357 u32 fctrl_reg;
358 u32 rmcs_reg; 358 u32 rmcs_reg;
359 u32 reg; 359 u32 reg;
360 u32 link_speed = 0;
361 bool link_up;
360 362
361#ifdef CONFIG_DCB 363#ifdef CONFIG_DCB
362 if (hw->fc.requested_mode == ixgbe_fc_pfc) 364 if (hw->fc.requested_mode == ixgbe_fc_pfc)
363 goto out; 365 goto out;
364 366
365#endif /* CONFIG_DCB */ 367#endif /* CONFIG_DCB */
368 /*
369 * On 82598 having Rx FC on causes resets while doing 1G
370 * so if it's on turn it off once we know link_speed. For
371 * more details see 82598 Specification update.
372 */
373 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
374 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
375 switch (hw->fc.requested_mode) {
376 case ixgbe_fc_full:
377 hw->fc.requested_mode = ixgbe_fc_tx_pause;
378 break;
379 case ixgbe_fc_rx_pause:
380 hw->fc.requested_mode = ixgbe_fc_none;
381 break;
382 default:
383 /* no change */
384 break;
385 }
386 }
387
366 /* Negotiate the fc mode to use */ 388 /* Negotiate the fc mode to use */
367 ret_val = ixgbe_fc_autoneg(hw); 389 ret_val = ixgbe_fc_autoneg(hw);
368 if (ret_val) 390 if (ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 538340527aa6..b49bd6b9feb7 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 688b8ca5da32..21f158f79dd0 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 27f3214bed2e..dfff0ffaa502 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index a1562287342f..9aea4f04bbd2 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 64a9fa15c059..5caafd4afbc3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index f30263898ebc..f0e9279d4669 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index ebbe53c352a7..cc728fa092e2 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index ec8a252636d3..4f7a26ab411e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 9e5e2827e4af..0f3f791e1e1d 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 3c7a79a7d7c6..dd4883f642be 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -223,7 +223,7 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
223 223
224 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != 224 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
225 adapter->dcb_cfg.bw_percentage[0][bwg_id]) { 225 adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
226 adapter->dcb_set_bitmap |= BIT_PG_RX; 226 adapter->dcb_set_bitmap |= BIT_PG_TX;
227 adapter->dcb_set_bitmap |= BIT_RESETLINK; 227 adapter->dcb_set_bitmap |= BIT_RESETLINK;
228 } 228 }
229} 229}
@@ -341,6 +341,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
341 if (!adapter->dcb_set_bitmap) 341 if (!adapter->dcb_set_bitmap)
342 return DCB_NO_HW_CHG; 342 return DCB_NO_HW_CHG;
343 343
344 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
345 adapter->ring_feature[RING_F_DCB].indices);
346
347 if (ret)
348 return DCB_NO_HW_CHG;
349
344 /* 350 /*
345 * Only take down the adapter if the configuration change 351 * Only take down the adapter if the configuration change
346 * requires a reset. 352 * requires a reset.
@@ -359,14 +365,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
359 } 365 }
360 } 366 }
361 367
362 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
363 adapter->ring_feature[RING_F_DCB].indices);
364 if (ret) {
365 if (adapter->dcb_set_bitmap & BIT_RESETLINK)
366 clear_bit(__IXGBE_RESETTING, &adapter->state);
367 return DCB_NO_HW_CHG;
368 }
369
370 if (adapter->dcb_cfg.pfc_mode_enable) { 368 if (adapter->dcb_cfg.pfc_mode_enable) {
371 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && 369 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
372 (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) 370 (adapter->hw.fc.current_mode != ixgbe_fc_pfc))
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 0bd49d3b9f65..d77961fc75f9 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index da32a108a7b4..e9a20c88c155 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index de8ff53187da..abf4b2b3f252 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index bd64387563f0..951b73cf5ca2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -52,7 +52,7 @@ static const char ixgbe_driver_string[] =
52 52
53#define DRV_VERSION "2.0.44-k2" 53#define DRV_VERSION "2.0.44-k2"
54const char ixgbe_driver_version[] = DRV_VERSION; 54const char ixgbe_driver_version[] = DRV_VERSION;
55static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; 55static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
56 56
57static const struct ixgbe_info *ixgbe_info_tbl[] = { 57static const struct ixgbe_info *ixgbe_info_tbl[] = {
58 [board_82598] = &ixgbe_82598_info, 58 [board_82598] = &ixgbe_82598_info,
@@ -262,10 +262,12 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
262 int reg_idx = tx_ring->reg_idx; 262 int reg_idx = tx_ring->reg_idx;
263 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 263 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
264 264
265 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 265 switch (adapter->hw.mac.type) {
266 case ixgbe_mac_82598EB:
266 tc = reg_idx >> 2; 267 tc = reg_idx >> 2;
267 txoff = IXGBE_TFCS_TXOFF0; 268 txoff = IXGBE_TFCS_TXOFF0;
268 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 269 break;
270 case ixgbe_mac_82599EB:
269 tc = 0; 271 tc = 0;
270 txoff = IXGBE_TFCS_TXOFF; 272 txoff = IXGBE_TFCS_TXOFF;
271 if (dcb_i == 8) { 273 if (dcb_i == 8) {
@@ -284,6 +286,9 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
284 tc += (reg_idx - 96) >> 4; 286 tc += (reg_idx - 96) >> 4;
285 } 287 }
286 } 288 }
289 break;
290 default:
291 tc = 0;
287 } 292 }
288 txoff <<= tc; 293 txoff <<= tc;
289 } 294 }
@@ -4373,6 +4378,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
4373 4378
4374 pci_set_power_state(pdev, PCI_D0); 4379 pci_set_power_state(pdev, PCI_D0);
4375 pci_restore_state(pdev); 4380 pci_restore_state(pdev);
4381 /*
4382 * pci_restore_state clears dev->state_saved so call
4383 * pci_save_state to restore it.
4384 */
4385 pci_save_state(pdev);
4376 4386
4377 err = pci_enable_device_mem(pdev); 4387 err = pci_enable_device_mem(pdev);
4378 if (err) { 4388 if (err) {
@@ -4918,7 +4928,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
4918 iph->daddr, 0, 4928 iph->daddr, 0,
4919 IPPROTO_TCP, 4929 IPPROTO_TCP,
4920 0); 4930 0);
4921 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 4931 } else if (skb_is_gso_v6(skb)) {
4922 ipv6_hdr(skb)->payload_len = 0; 4932 ipv6_hdr(skb)->payload_len = 0;
4923 tcp_hdr(skb)->check = 4933 tcp_hdr(skb)->check =
4924 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4934 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -5157,19 +5167,19 @@ dma_error:
5157 tx_buffer_info->dma = 0; 5167 tx_buffer_info->dma = 0;
5158 tx_buffer_info->time_stamp = 0; 5168 tx_buffer_info->time_stamp = 0;
5159 tx_buffer_info->next_to_watch = 0; 5169 tx_buffer_info->next_to_watch = 0;
5160 count--; 5170 if (count)
5171 count--;
5161 5172
5162 /* clear timestamp and dma mappings for remaining portion of packet */ 5173 /* clear timestamp and dma mappings for remaining portion of packet */
5163 while (count >= 0) { 5174 while (count--) {
5164 count--; 5175 if (i==0)
5165 i--;
5166 if (i < 0)
5167 i += tx_ring->count; 5176 i += tx_ring->count;
5177 i--;
5168 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5178 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5169 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
5170 } 5180 }
5171 5181
5172 return count; 5182 return 0;
5173} 5183}
5174 5184
5175static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 5185static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5319,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5319 struct ixgbe_adapter *adapter = netdev_priv(dev); 5329 struct ixgbe_adapter *adapter = netdev_priv(dev);
5320 int txq = smp_processor_id(); 5330 int txq = smp_processor_id();
5321 5331
5322 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) 5332 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5333 while (unlikely(txq >= dev->real_num_tx_queues))
5334 txq -= dev->real_num_tx_queues;
5323 return txq; 5335 return txq;
5336 }
5324 5337
5325#ifdef IXGBE_FCOE 5338#ifdef IXGBE_FCOE
5326 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5339 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
@@ -5566,6 +5579,10 @@ static void ixgbe_netpoll(struct net_device *netdev)
5566 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5579 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5567 int i; 5580 int i;
5568 5581
5582 /* if interface is down do nothing */
5583 if (test_bit(__IXGBE_DOWN, &adapter->state))
5584 return;
5585
5569 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 5586 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
5570 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 5587 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5571 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 5588 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -5746,6 +5763,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5746 if (err) 5763 if (err)
5747 goto err_sw_init; 5764 goto err_sw_init;
5748 5765
5766 /* Make it possible the adapter to be woken up via WOL */
5767 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
5768 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5769
5749 /* 5770 /*
5750 * If there is a fan on this device and it has failed log the 5771 * If there is a fan on this device and it has failed log the
5751 * failure. 5772 * failure.
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 9ecad17522c3..1c1efd386956 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9b700f5bf1ed..9cf5f3b4cc5d 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 84650c6ebe03..9eafddfa1b97 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index c146304d8d6c..c0ceebccaa49 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -854,8 +854,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
854 854
855static irqreturn_t ks_irq(int irq, void *pw) 855static irqreturn_t ks_irq(int irq, void *pw)
856{ 856{
857 struct ks_net *ks = pw; 857 struct net_device *netdev = pw;
858 struct net_device *netdev = ks->netdev; 858 struct ks_net *ks = netdev_priv(netdev);
859 u16 status; 859 u16 status;
860 860
861 /*this should be the first in IRQ handler */ 861 /*this should be the first in IRQ handler */
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 336e7c7a9275..a8522bd73ae7 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -134,7 +134,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
134 struct sk_buff *skb; 134 struct sk_buff *skb;
135 int i; 135 int i;
136 136
137 lp->rx_skb = kzalloc(sizeof(struct sk_buff)*RX_BD_NUM, GFP_KERNEL); 137 lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
138 /* allocate the tx and rx ring buffer descriptors. */ 138 /* allocate the tx and rx ring buffer descriptors. */
139 /* returns a virtual addres and a physical address. */ 139 /* returns a virtual addres and a physical address. */
140 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 140 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 291a505fd4fc..3cf56d90d859 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1174,7 +1174,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1174 return 0; 1174 return 0;
1175 1175
1176err_port: 1176err_port:
1177 for (port = 1; port <= dev->caps.num_ports; port++) 1177 for (--port; port >= 1; --port)
1178 mlx4_cleanup_port_info(&priv->port[port]); 1178 mlx4_cleanup_port_info(&priv->port[port]);
1179 1179
1180 mlx4_cleanup_mcg_table(dev); 1180 mlx4_cleanup_mcg_table(dev);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 1405a170bb43..af67af55efe7 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -656,6 +656,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
656 struct sk_buff *skb; 656 struct sk_buff *skb;
657 int rx; 657 int rx;
658 struct rx_desc *rx_desc; 658 struct rx_desc *rx_desc;
659 int size;
659 660
660 skb = __skb_dequeue(&mp->rx_recycle); 661 skb = __skb_dequeue(&mp->rx_recycle);
661 if (skb == NULL) 662 if (skb == NULL)
@@ -678,10 +679,11 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
678 679
679 rx_desc = rxq->rx_desc_area + rx; 680 rx_desc = rxq->rx_desc_area + rx;
680 681
682 size = skb->end - skb->data;
681 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, 683 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
682 skb->data, mp->skb_size, 684 skb->data, size,
683 DMA_FROM_DEVICE); 685 DMA_FROM_DEVICE);
684 rx_desc->buf_size = mp->skb_size; 686 rx_desc->buf_size = size;
685 rxq->rx_skb[rx] = skb; 687 rxq->rx_skb[rx] = skb;
686 wmb(); 688 wmb();
687 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; 689 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 76cd1f3e9fc8..9bc5bd1d538a 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 65 56#define _NETXEN_NIC_LINUX_SUBVERSION 72
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.65" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.72"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index ddd704ae0188..542f408333ff 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -66,7 +66,7 @@ static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
66 66
67#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test) 67#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test)
68 68
69#define NETXEN_NIC_REGS_COUNT 42 69#define NETXEN_NIC_REGS_COUNT 30
70#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) 70#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
71#define NETXEN_MAX_EEPROM_LEN 1024 71#define NETXEN_MAX_EEPROM_LEN 1024
72 72
@@ -312,150 +312,91 @@ static int netxen_nic_get_regs_len(struct net_device *dev)
312 return NETXEN_NIC_REGS_LEN; 312 return NETXEN_NIC_REGS_LEN;
313} 313}
314 314
315struct netxen_niu_regs {
316 __u32 reg[NETXEN_NIC_REGS_COUNT];
317};
318
319static struct netxen_niu_regs niu_registers[] = {
320 {
321 /* GB Mode */
322 {
323 NETXEN_NIU_GB_SERDES_RESET,
324 NETXEN_NIU_GB0_MII_MODE,
325 NETXEN_NIU_GB1_MII_MODE,
326 NETXEN_NIU_GB2_MII_MODE,
327 NETXEN_NIU_GB3_MII_MODE,
328 NETXEN_NIU_GB0_GMII_MODE,
329 NETXEN_NIU_GB1_GMII_MODE,
330 NETXEN_NIU_GB2_GMII_MODE,
331 NETXEN_NIU_GB3_GMII_MODE,
332 NETXEN_NIU_REMOTE_LOOPBACK,
333 NETXEN_NIU_GB0_HALF_DUPLEX,
334 NETXEN_NIU_GB1_HALF_DUPLEX,
335 NETXEN_NIU_RESET_SYS_FIFOS,
336 NETXEN_NIU_GB_CRC_DROP,
337 NETXEN_NIU_GB_DROP_WRONGADDR,
338 NETXEN_NIU_TEST_MUX_CTL,
339
340 NETXEN_NIU_GB_MAC_CONFIG_0(0),
341 NETXEN_NIU_GB_MAC_CONFIG_1(0),
342 NETXEN_NIU_GB_HALF_DUPLEX_CTRL(0),
343 NETXEN_NIU_GB_MAX_FRAME_SIZE(0),
344 NETXEN_NIU_GB_TEST_REG(0),
345 NETXEN_NIU_GB_MII_MGMT_CONFIG(0),
346 NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
347 NETXEN_NIU_GB_MII_MGMT_ADDR(0),
348 NETXEN_NIU_GB_MII_MGMT_CTRL(0),
349 NETXEN_NIU_GB_MII_MGMT_STATUS(0),
350 NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
351 NETXEN_NIU_GB_INTERFACE_CTRL(0),
352 NETXEN_NIU_GB_INTERFACE_STATUS(0),
353 NETXEN_NIU_GB_STATION_ADDR_0(0),
354 NETXEN_NIU_GB_STATION_ADDR_1(0),
355 -1,
356 }
357 },
358 {
359 /* XG Mode */
360 {
361 NETXEN_NIU_XG_SINGLE_TERM,
362 NETXEN_NIU_XG_DRIVE_HI,
363 NETXEN_NIU_XG_DRIVE_LO,
364 NETXEN_NIU_XG_DTX,
365 NETXEN_NIU_XG_DEQ,
366 NETXEN_NIU_XG_WORD_ALIGN,
367 NETXEN_NIU_XG_RESET,
368 NETXEN_NIU_XG_POWER_DOWN,
369 NETXEN_NIU_XG_RESET_PLL,
370 NETXEN_NIU_XG_SERDES_LOOPBACK,
371 NETXEN_NIU_XG_DO_BYTE_ALIGN,
372 NETXEN_NIU_XG_TX_ENABLE,
373 NETXEN_NIU_XG_RX_ENABLE,
374 NETXEN_NIU_XG_STATUS,
375 NETXEN_NIU_XG_PAUSE_THRESHOLD,
376 NETXEN_NIU_XGE_CONFIG_0,
377 NETXEN_NIU_XGE_CONFIG_1,
378 NETXEN_NIU_XGE_IPG,
379 NETXEN_NIU_XGE_STATION_ADDR_0_HI,
380 NETXEN_NIU_XGE_STATION_ADDR_0_1,
381 NETXEN_NIU_XGE_STATION_ADDR_1_LO,
382 NETXEN_NIU_XGE_STATUS,
383 NETXEN_NIU_XGE_MAX_FRAME_SIZE,
384 NETXEN_NIU_XGE_PAUSE_FRAME_VALUE,
385 NETXEN_NIU_XGE_TX_BYTE_CNT,
386 NETXEN_NIU_XGE_TX_FRAME_CNT,
387 NETXEN_NIU_XGE_RX_BYTE_CNT,
388 NETXEN_NIU_XGE_RX_FRAME_CNT,
389 NETXEN_NIU_XGE_AGGR_ERROR_CNT,
390 NETXEN_NIU_XGE_MULTICAST_FRAME_CNT,
391 NETXEN_NIU_XGE_UNICAST_FRAME_CNT,
392 NETXEN_NIU_XGE_CRC_ERROR_CNT,
393 NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
394 NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
395 NETXEN_NIU_XGE_LOCAL_ERROR_CNT,
396 NETXEN_NIU_XGE_REMOTE_ERROR_CNT,
397 NETXEN_NIU_XGE_CONTROL_CHAR_CNT,
398 NETXEN_NIU_XGE_PAUSE_FRAME_CNT,
399 -1,
400 }
401 }
402};
403
404static void 315static void
405netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) 316netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
406{ 317{
407 struct netxen_adapter *adapter = netdev_priv(dev); 318 struct netxen_adapter *adapter = netdev_priv(dev);
408 __u32 mode, *regs_buff = p; 319 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
409 int i, window; 320 struct nx_host_sds_ring *sds_ring;
321 u32 *regs_buff = p;
322 int ring, i = 0;
323 int port = adapter->physical_port;
410 324
411 memset(p, 0, NETXEN_NIC_REGS_LEN); 325 memset(p, 0, NETXEN_NIC_REGS_LEN);
326
412 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | 327 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
413 (adapter->pdev)->device; 328 (adapter->pdev)->device;
414 /* which mode */
415 regs_buff[0] = NXRD32(adapter, NETXEN_NIU_MODE);
416 mode = regs_buff[0];
417
418 /* Common registers to all the modes */
419 regs_buff[2] = NXRD32(adapter, NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER);
420 /* GB/XGB Mode */
421 mode = (mode / 2) - 1;
422 window = 0;
423 if (mode <= 1) {
424 for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) {
425 /* GB: port specific registers */
426 if (mode == 0 && i >= 19)
427 window = adapter->physical_port *
428 NETXEN_NIC_PORT_WINDOW;
429
430 regs_buff[i] = NXRD32(adapter,
431 niu_registers[mode].reg[i - 3] + window);
432 }
433 329
330 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
331 return;
332
333 regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE);
334 regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE);
335 regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
336 regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg);
337 regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
338 regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE);
339 regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
340 regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
341 regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2);
342
343 regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c);
344 regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c);
345 regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c);
346 regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c);
347
348 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
349
350 regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c);
351 i += 2;
352
353 regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3);
354 regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
355
356 } else {
357 i++;
358
359 regs_buff[i++] = NXRD32(adapter,
360 NETXEN_NIU_XGE_CONFIG_0+(0x10000*port));
361 regs_buff[i++] = NXRD32(adapter,
362 NETXEN_NIU_XGE_CONFIG_1+(0x10000*port));
363
364 regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE);
365 regs_buff[i++] = NXRDIO(adapter,
366 adapter->tx_ring->crb_cmd_consumer);
367 }
368
369 regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer);
370
371 regs_buff[i++] = NXRDIO(adapter,
372 recv_ctx->rds_rings[0].crb_rcv_producer);
373 regs_buff[i++] = NXRDIO(adapter,
374 recv_ctx->rds_rings[1].crb_rcv_producer);
375
376 regs_buff[i++] = adapter->max_sds_rings;
377
378 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
379 sds_ring = &(recv_ctx->sds_rings[ring]);
380 regs_buff[i++] = NXRDIO(adapter,
381 sds_ring->crb_sts_consumer);
434 } 382 }
435} 383}
436 384
437static u32 netxen_nic_test_link(struct net_device *dev) 385static u32 netxen_nic_test_link(struct net_device *dev)
438{ 386{
439 struct netxen_adapter *adapter = netdev_priv(dev); 387 struct netxen_adapter *adapter = netdev_priv(dev);
440 __u32 status; 388 u32 val, port;
441 int val;
442 389
443 /* read which mode */ 390 port = adapter->physical_port;
444 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 391 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
445 if (adapter->phy_read && 392 val = NXRD32(adapter, CRB_XG_STATE_P3);
446 adapter->phy_read(adapter, 393 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
447 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, 394 return (val == XG_LINK_UP_P3) ? 0 : 1;
448 &status) != 0) 395 } else {
449 return -EIO;
450 else {
451 val = netxen_get_phy_link(status);
452 return !val;
453 }
454 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
455 val = NXRD32(adapter, CRB_XG_STATE); 396 val = NXRD32(adapter, CRB_XG_STATE);
397 val = (val >> port*8) & 0xff;
456 return (val == XG_LINK_UP) ? 0 : 1; 398 return (val == XG_LINK_UP) ? 0 : 1;
457 } 399 }
458 return -EIO;
459} 400}
460 401
461static int 402static int
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 2e364fee3cbb..85e28e60ecf1 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -345,8 +345,7 @@ netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
345void 345void
346netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) 346netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem)
347{ 347{
348 int val; 348 NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
349 val = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
350} 349}
351 350
352int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) 351int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
@@ -691,6 +690,9 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
691 struct list_head *head; 690 struct list_head *head;
692 nx_mac_list_t *cur; 691 nx_mac_list_t *cur;
693 692
693 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
694 return;
695
694 list_splice_tail_init(&adapter->mac_list, &del_list); 696 list_splice_tail_init(&adapter->mac_list, &del_list);
695 697
696 nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); 698 nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 02f8d4b4db63..64cff68d372c 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -184,6 +184,8 @@ skip_rds:
184 184
185 tx_ring = adapter->tx_ring; 185 tx_ring = adapter->tx_ring;
186 vfree(tx_ring->cmd_buf_arr); 186 vfree(tx_ring->cmd_buf_arr);
187 kfree(tx_ring);
188 adapter->tx_ring = NULL;
187} 189}
188 190
189int netxen_alloc_sw_resources(struct netxen_adapter *adapter) 191int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
@@ -782,7 +784,7 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
782 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) 784 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
783 return 1; 785 return 1;
784 786
785 old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); 787 old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
786 788
787 for (i = 0; i < 10; i++) { 789 for (i = 0; i < 10; i++) {
788 790
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 6cae26a5bd67..24279e6e55f5 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -340,7 +340,7 @@ netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
340 if (!(first_boot & 0x4)) { 340 if (!(first_boot & 0x4)) {
341 first_boot |= 0x4; 341 first_boot |= 0x4;
342 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); 342 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
343 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); 343 NXRD32(adapter, NETXEN_PCIE_REG(0x4));
344 } 344 }
345 345
346 /* This is the first boot after power up */ 346 /* This is the first boot after power up */
@@ -1898,12 +1898,8 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1898 linkup = (val == XG_LINK_UP_P3); 1898 linkup = (val == XG_LINK_UP_P3);
1899 } else { 1899 } else {
1900 val = NXRD32(adapter, CRB_XG_STATE); 1900 val = NXRD32(adapter, CRB_XG_STATE);
1901 if (adapter->ahw.port_type == NETXEN_NIC_GBE) 1901 val = (val >> port*8) & 0xff;
1902 linkup = (val >> port) & 1; 1902 linkup = (val == XG_LINK_UP);
1903 else {
1904 val = (val >> port*8) & 0xff;
1905 linkup = (val == XG_LINK_UP);
1906 }
1907 } 1903 }
1908 1904
1909 netxen_advert_link_change(adapter, linkup); 1905 netxen_advert_link_change(adapter, linkup);
@@ -1945,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1945 netif_wake_queue(adapter->netdev); 1941 netif_wake_queue(adapter->netdev);
1946 1942
1947 clear_bit(__NX_RESETTING, &adapter->state); 1943 clear_bit(__NX_RESETTING, &adapter->state);
1948 1944 return;
1949 } else { 1945 } else {
1950 clear_bit(__NX_RESETTING, &adapter->state); 1946 clear_bit(__NX_RESETTING, &adapter->state);
1951 if (!netxen_nic_reset_context(adapter)) { 1947 if (!netxen_nic_reset_context(adapter)) {
@@ -2244,7 +2240,9 @@ netxen_detach_work(struct work_struct *work)
2244 2240
2245 netxen_nic_down(adapter, netdev); 2241 netxen_nic_down(adapter, netdev);
2246 2242
2243 rtnl_lock();
2247 netxen_nic_detach(adapter); 2244 netxen_nic_detach(adapter);
2245 rtnl_unlock();
2248 2246
2249 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2247 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
2250 2248
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 8ce58c4c7dd3..2aed2b382c40 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -2844,7 +2844,7 @@ static int tcam_wait_bit(struct niu *np, u64 bit)
2844 break; 2844 break;
2845 udelay(1); 2845 udelay(1);
2846 } 2846 }
2847 if (limit < 0) 2847 if (limit <= 0)
2848 return -ENODEV; 2848 return -ENODEV;
2849 2849
2850 return 0; 2850 return 0;
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 813aca3fc433..7b17404d0858 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -717,6 +717,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
717 PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), 717 PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
718 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), 718 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a),
719 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 719 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
720 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
720 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 721 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
721 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 722 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
722 PCMCIA_DEVICE_NULL, 723 PCMCIA_DEVICE_NULL,
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 8a5ae3b182ed..12e3233868e9 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1402,7 +1402,6 @@ static void BuildLAF(int *ladrf, int *adr)
1402 for (i = 0; i < 8; i++) 1402 for (i = 0; i < 8; i++)
1403 printk(KERN_CONT " %02X", ladrf[i]); 1403 printk(KERN_CONT " %02X", ladrf[i]);
1404 printk(KERN_CONT "\n"); 1404 printk(KERN_CONT "\n");
1405 }
1406#endif 1405#endif
1407} /* BuildLAF */ 1406} /* BuildLAF */
1408 1407
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 92ed3fbf89a5..776cad2f5715 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1741,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1741 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), 1741 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
1742 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), 1742 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
1743 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), 1743 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
1744 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), 1744 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
1745 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), 1745 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
1746 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), 1746 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
1747 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), 1747 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
@@ -1754,7 +1754,7 @@ MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
1754MODULE_FIRMWARE("cis/PCMLM28.cis"); 1754MODULE_FIRMWARE("cis/PCMLM28.cis");
1755MODULE_FIRMWARE("cis/DP83903.cis"); 1755MODULE_FIRMWARE("cis/DP83903.cis");
1756MODULE_FIRMWARE("cis/LA-PCM.cis"); 1756MODULE_FIRMWARE("cis/LA-PCM.cis");
1757MODULE_FIRMWARE("PE520.cis"); 1757MODULE_FIRMWARE("cis/PE520.cis");
1758MODULE_FIRMWARE("cis/NE2K.cis"); 1758MODULE_FIRMWARE("cis/NE2K.cis");
1759MODULE_FIRMWARE("cis/PE-200.cis"); 1759MODULE_FIRMWARE("cis/PE-200.cis");
1760MODULE_FIRMWARE("cis/tamarack.cis"); 1760MODULE_FIRMWARE("cis/tamarack.cis");
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index dcc67a35e8f2..e154677ff706 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -45,6 +45,7 @@ static const char *const version =
45#include <linux/crc32.h> 45#include <linux/crc32.h>
46#include <linux/netdevice.h> 46#include <linux/netdevice.h>
47#include <linux/etherdevice.h> 47#include <linux/etherdevice.h>
48#include <linux/if_ether.h>
48#include <linux/skbuff.h> 49#include <linux/skbuff.h>
49#include <linux/spinlock.h> 50#include <linux/spinlock.h>
50#include <linux/moduleparam.h> 51#include <linux/moduleparam.h>
@@ -1765,7 +1766,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1765 1766
1766 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ 1767 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1767 if (!is_valid_ether_addr(dev->perm_addr)) 1768 if (!is_valid_ether_addr(dev->perm_addr))
1768 memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); 1769 memset(dev->dev_addr, 0, ETH_ALEN);
1769 1770
1770 if (pcnet32_debug & NETIF_MSG_PROBE) { 1771 if (pcnet32_debug & NETIF_MSG_PROBE) {
1771 printk(" %pM", dev->dev_addr); 1772 printk(" %pM", dev->dev_addr);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index c13cf64095b6..33c4b12a63ba 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -331,8 +331,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
331 bool clk125en = true; 331 bool clk125en = true;
332 332
333 /* Abort if we are using an untested phy. */ 333 /* Abort if we are using an untested phy. */
334 if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 || 334 if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
335 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 || 335 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
336 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M) 336 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
337 return; 337 return;
338 338
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index bd4e8d72dc08..e17b70291bbc 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -264,6 +264,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
264 (phydev->phy_id & phydrv->phy_id_mask)); 264 (phydev->phy_id & phydrv->phy_id_mask));
265} 265}
266 266
267#ifdef CONFIG_PM
268
267static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) 269static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
268{ 270{
269 struct device_driver *drv = phydev->dev.driver; 271 struct device_driver *drv = phydev->dev.driver;
@@ -295,34 +297,88 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
295 return true; 297 return true;
296} 298}
297 299
298/* Suspend and resume. Copied from platform_suspend and 300static int mdio_bus_suspend(struct device *dev)
299 * platform_resume
300 */
301static int mdio_bus_suspend(struct device * dev, pm_message_t state)
302{ 301{
303 struct phy_driver *phydrv = to_phy_driver(dev->driver); 302 struct phy_driver *phydrv = to_phy_driver(dev->driver);
304 struct phy_device *phydev = to_phy_device(dev); 303 struct phy_device *phydev = to_phy_device(dev);
305 304
305 /*
306 * We must stop the state machine manually, otherwise it stops out of
307 * control, possibly with the phydev->lock held. Upon resume, netdev
308 * may call phy routines that try to grab the same lock, and that may
309 * lead to a deadlock.
310 */
311 if (phydev->attached_dev)
312 phy_stop_machine(phydev);
313
306 if (!mdio_bus_phy_may_suspend(phydev)) 314 if (!mdio_bus_phy_may_suspend(phydev))
307 return 0; 315 return 0;
316
308 return phydrv->suspend(phydev); 317 return phydrv->suspend(phydev);
309} 318}
310 319
311static int mdio_bus_resume(struct device * dev) 320static int mdio_bus_resume(struct device *dev)
312{ 321{
313 struct phy_driver *phydrv = to_phy_driver(dev->driver); 322 struct phy_driver *phydrv = to_phy_driver(dev->driver);
314 struct phy_device *phydev = to_phy_device(dev); 323 struct phy_device *phydev = to_phy_device(dev);
324 int ret;
315 325
316 if (!mdio_bus_phy_may_suspend(phydev)) 326 if (!mdio_bus_phy_may_suspend(phydev))
327 goto no_resume;
328
329 ret = phydrv->resume(phydev);
330 if (ret < 0)
331 return ret;
332
333no_resume:
334 if (phydev->attached_dev)
335 phy_start_machine(phydev, NULL);
336
337 return 0;
338}
339
340static int mdio_bus_restore(struct device *dev)
341{
342 struct phy_device *phydev = to_phy_device(dev);
343 struct net_device *netdev = phydev->attached_dev;
344 int ret;
345
346 if (!netdev)
317 return 0; 347 return 0;
318 return phydrv->resume(phydev); 348
349 ret = phy_init_hw(phydev);
350 if (ret < 0)
351 return ret;
352
353 /* The PHY needs to renegotiate. */
354 phydev->link = 0;
355 phydev->state = PHY_UP;
356
357 phy_start_machine(phydev, NULL);
358
359 return 0;
319} 360}
320 361
362static struct dev_pm_ops mdio_bus_pm_ops = {
363 .suspend = mdio_bus_suspend,
364 .resume = mdio_bus_resume,
365 .freeze = mdio_bus_suspend,
366 .thaw = mdio_bus_resume,
367 .restore = mdio_bus_restore,
368};
369
370#define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops)
371
372#else
373
374#define MDIO_BUS_PM_OPS NULL
375
376#endif /* CONFIG_PM */
377
321struct bus_type mdio_bus_type = { 378struct bus_type mdio_bus_type = {
322 .name = "mdio_bus", 379 .name = "mdio_bus",
323 .match = mdio_bus_match, 380 .match = mdio_bus_match,
324 .suspend = mdio_bus_suspend, 381 .pm = MDIO_BUS_PM_OPS,
325 .resume = mdio_bus_resume,
326}; 382};
327EXPORT_SYMBOL(mdio_bus_type); 383EXPORT_SYMBOL(mdio_bus_type);
328 384
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b0e9f9c51721..0295097d6c44 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -410,7 +410,6 @@ EXPORT_SYMBOL(phy_start_aneg);
410 410
411 411
412static void phy_change(struct work_struct *work); 412static void phy_change(struct work_struct *work);
413static void phy_state_machine(struct work_struct *work);
414 413
415/** 414/**
416 * phy_start_machine - start PHY state machine tracking 415 * phy_start_machine - start PHY state machine tracking
@@ -430,7 +429,6 @@ void phy_start_machine(struct phy_device *phydev,
430{ 429{
431 phydev->adjust_state = handler; 430 phydev->adjust_state = handler;
432 431
433 INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine);
434 schedule_delayed_work(&phydev->state_queue, HZ); 432 schedule_delayed_work(&phydev->state_queue, HZ);
435} 433}
436 434
@@ -761,7 +759,7 @@ EXPORT_SYMBOL(phy_start);
761 * phy_state_machine - Handle the state machine 759 * phy_state_machine - Handle the state machine
762 * @work: work_struct that describes the work to be done 760 * @work: work_struct that describes the work to be done
763 */ 761 */
764static void phy_state_machine(struct work_struct *work) 762void phy_state_machine(struct work_struct *work)
765{ 763{
766 struct delayed_work *dwork = to_delayed_work(work); 764 struct delayed_work *dwork = to_delayed_work(work);
767 struct phy_device *phydev = 765 struct phy_device *phydev =
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b10fedd82143..adbc0fded130 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -177,6 +177,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
177 dev->state = PHY_DOWN; 177 dev->state = PHY_DOWN;
178 178
179 mutex_init(&dev->lock); 179 mutex_init(&dev->lock);
180 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
180 181
181 return dev; 182 return dev;
182} 183}
@@ -378,6 +379,20 @@ void phy_disconnect(struct phy_device *phydev)
378} 379}
379EXPORT_SYMBOL(phy_disconnect); 380EXPORT_SYMBOL(phy_disconnect);
380 381
382int phy_init_hw(struct phy_device *phydev)
383{
384 int ret;
385
386 if (!phydev->drv || !phydev->drv->config_init)
387 return 0;
388
389 ret = phy_scan_fixups(phydev);
390 if (ret < 0)
391 return ret;
392
393 return phydev->drv->config_init(phydev);
394}
395
381/** 396/**
382 * phy_attach_direct - attach a network device to a given PHY device pointer 397 * phy_attach_direct - attach a network device to a given PHY device pointer
383 * @dev: network device to attach 398 * @dev: network device to attach
@@ -425,21 +440,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
425 /* Do initial configuration here, now that 440 /* Do initial configuration here, now that
426 * we have certain key parameters 441 * we have certain key parameters
427 * (dev_flags and interface) */ 442 * (dev_flags and interface) */
428 if (phydev->drv->config_init) { 443 return phy_init_hw(phydev);
429 int err;
430
431 err = phy_scan_fixups(phydev);
432
433 if (err < 0)
434 return err;
435
436 err = phydev->drv->config_init(phydev);
437
438 if (err < 0)
439 return err;
440 }
441
442 return 0;
443} 444}
444EXPORT_SYMBOL(phy_attach_direct); 445EXPORT_SYMBOL(phy_attach_direct);
445 446
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 707b391afa02..894a7c84faef 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -4119,7 +4119,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4119 err = pcie_set_readrq(pdev, 4096); 4119 err = pcie_set_readrq(pdev, 4096);
4120 if (err) { 4120 if (err) {
4121 dev_err(&pdev->dev, "Set readrq failed.\n"); 4121 dev_err(&pdev->dev, "Set readrq failed.\n");
4122 goto err_out; 4122 goto err_out1;
4123 } 4123 }
4124 4124
4125 err = pci_request_regions(pdev, DRV_NAME); 4125 err = pci_request_regions(pdev, DRV_NAME);
@@ -4140,7 +4140,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4140 4140
4141 if (err) { 4141 if (err) {
4142 dev_err(&pdev->dev, "No usable DMA configuration.\n"); 4142 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4143 goto err_out; 4143 goto err_out2;
4144 } 4144 }
4145 4145
4146 /* Set PCIe reset type for EEH to fundamental. */ 4146 /* Set PCIe reset type for EEH to fundamental. */
@@ -4152,7 +4152,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4152 if (!qdev->reg_base) { 4152 if (!qdev->reg_base) {
4153 dev_err(&pdev->dev, "Register mapping failed.\n"); 4153 dev_err(&pdev->dev, "Register mapping failed.\n");
4154 err = -ENOMEM; 4154 err = -ENOMEM;
4155 goto err_out; 4155 goto err_out2;
4156 } 4156 }
4157 4157
4158 qdev->doorbell_area_size = pci_resource_len(pdev, 3); 4158 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
@@ -4162,14 +4162,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4162 if (!qdev->doorbell_area) { 4162 if (!qdev->doorbell_area) {
4163 dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); 4163 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4164 err = -ENOMEM; 4164 err = -ENOMEM;
4165 goto err_out; 4165 goto err_out2;
4166 } 4166 }
4167 4167
4168 err = ql_get_board_info(qdev); 4168 err = ql_get_board_info(qdev);
4169 if (err) { 4169 if (err) {
4170 dev_err(&pdev->dev, "Register access failed.\n"); 4170 dev_err(&pdev->dev, "Register access failed.\n");
4171 err = -EIO; 4171 err = -EIO;
4172 goto err_out; 4172 goto err_out2;
4173 } 4173 }
4174 qdev->msg_enable = netif_msg_init(debug, default_msg); 4174 qdev->msg_enable = netif_msg_init(debug, default_msg);
4175 spin_lock_init(&qdev->hw_lock); 4175 spin_lock_init(&qdev->hw_lock);
@@ -4179,7 +4179,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4179 err = qdev->nic_ops->get_flash(qdev); 4179 err = qdev->nic_ops->get_flash(qdev);
4180 if (err) { 4180 if (err) {
4181 dev_err(&pdev->dev, "Invalid FLASH.\n"); 4181 dev_err(&pdev->dev, "Invalid FLASH.\n");
4182 goto err_out; 4182 goto err_out2;
4183 } 4183 }
4184 4184
4185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 4185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
@@ -4212,8 +4212,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4212 DRV_NAME, DRV_VERSION); 4212 DRV_NAME, DRV_VERSION);
4213 } 4213 }
4214 return 0; 4214 return 0;
4215err_out: 4215err_out2:
4216 ql_release_all(pdev); 4216 ql_release_all(pdev);
4217err_out1:
4217 pci_disable_device(pdev); 4218 pci_disable_device(pdev);
4218 return err; 4219 return err;
4219} 4220}
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 20a71749154a..1c257098d0a6 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1293,7 +1293,7 @@ static void rr_dump(struct net_device *dev)
1293 1293
1294 printk("Error code 0x%x\n", readl(&regs->Fail1)); 1294 printk("Error code 0x%x\n", readl(&regs->Fail1));
1295 1295
1296 index = (((readl(&regs->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES; 1296 index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
1297 cons = rrpriv->dirty_tx; 1297 cons = rrpriv->dirty_tx;
1298 printk("TX ring index %i, TX consumer %i\n", 1298 printk("TX ring index %i, TX consumer %i\n",
1299 index, cons); 1299 index, cons);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index cc4218667cba..3c4836d0898f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3421,7 +3421,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3421 break; 3421 break;
3422 } 3422 }
3423 } else { 3423 } else {
3424 if (!(val64 & busy_bit)) { 3424 if (val64 & busy_bit) {
3425 ret = SUCCESS; 3425 ret = SUCCESS;
3426 break; 3426 break;
3427 } 3427 }
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index f983e3b507cc..46997e177ee3 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -741,14 +741,14 @@ static int efx_probe_port(struct efx_nic *efx)
741 741
742 EFX_LOG(efx, "create port\n"); 742 EFX_LOG(efx, "create port\n");
743 743
744 if (phy_flash_cfg)
745 efx->phy_mode = PHY_MODE_SPECIAL;
746
744 /* Connect up MAC/PHY operations table */ 747 /* Connect up MAC/PHY operations table */
745 rc = efx->type->probe_port(efx); 748 rc = efx->type->probe_port(efx);
746 if (rc) 749 if (rc)
747 goto err; 750 goto err;
748 751
749 if (phy_flash_cfg)
750 efx->phy_mode = PHY_MODE_SPECIAL;
751
752 /* Sanity check MAC address */ 752 /* Sanity check MAC address */
753 if (is_valid_ether_addr(efx->mac_address)) { 753 if (is_valid_ether_addr(efx->mac_address)) {
754 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); 754 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
@@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2284 fail2: 2284 fail2:
2285 efx_fini_struct(efx); 2285 efx_fini_struct(efx);
2286 fail1: 2286 fail1:
2287 WARN_ON(rc > 0);
2287 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); 2288 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2288 free_netdev(net_dev); 2289 free_netdev(net_dev);
2289 return rc; 2290 return rc;
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 17afcd26e870..9d009c46e962 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -925,6 +925,7 @@ static int falcon_probe_port(struct efx_nic *efx)
925 925
926static void falcon_remove_port(struct efx_nic *efx) 926static void falcon_remove_port(struct efx_nic *efx)
927{ 927{
928 efx->phy_op->remove(efx);
928 efx_nic_free_buffer(efx, &efx->stats_buffer); 929 efx_nic_free_buffer(efx, &efx->stats_buffer);
929} 930}
930 931
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 3da933f8f079..8ccab2c67a20 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -111,16 +111,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
111 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK); 111 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
112} 112}
113 113
114/* Get status of XAUI link */ 114static bool falcon_xgxs_link_ok(struct efx_nic *efx)
115static bool falcon_xaui_link_ok(struct efx_nic *efx)
116{ 115{
117 efx_oword_t reg; 116 efx_oword_t reg;
118 bool align_done, link_ok = false; 117 bool align_done, link_ok = false;
119 int sync_status; 118 int sync_status;
120 119
121 if (LOOPBACK_INTERNAL(efx))
122 return true;
123
124 /* Read link status */ 120 /* Read link status */
125 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT); 121 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
126 122
@@ -135,14 +131,24 @@ static bool falcon_xaui_link_ok(struct efx_nic *efx)
135 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); 131 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
136 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT); 132 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
137 133
138 /* If the link is up, then check the phy side of the xaui link */
139 if (efx->link_state.up && link_ok)
140 if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS))
141 link_ok = efx_mdio_phyxgxs_lane_sync(efx);
142
143 return link_ok; 134 return link_ok;
144} 135}
145 136
137static bool falcon_xmac_link_ok(struct efx_nic *efx)
138{
139 /*
140 * Check MAC's XGXS link status except when using XGMII loopback
141 * which bypasses the XGXS block.
142 * If possible, check PHY's XGXS link status except when using
143 * MAC loopback.
144 */
145 return (efx->loopback_mode == LOOPBACK_XGMII ||
146 falcon_xgxs_link_ok(efx)) &&
147 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
148 LOOPBACK_INTERNAL(efx) ||
149 efx_mdio_phyxgxs_lane_sync(efx));
150}
151
146void falcon_reconfigure_xmac_core(struct efx_nic *efx) 152void falcon_reconfigure_xmac_core(struct efx_nic *efx)
147{ 153{
148 unsigned int max_frame_len; 154 unsigned int max_frame_len;
@@ -245,9 +251,9 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
245 251
246 252
247/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ 253/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
248static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries) 254static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
249{ 255{
250 bool mac_up = falcon_xaui_link_ok(efx); 256 bool mac_up = falcon_xmac_link_ok(efx);
251 257
252 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || 258 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
253 efx_phy_mode_disabled(efx->phy_mode)) 259 efx_phy_mode_disabled(efx->phy_mode))
@@ -261,7 +267,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
261 falcon_reset_xaui(efx); 267 falcon_reset_xaui(efx);
262 udelay(200); 268 udelay(200);
263 269
264 mac_up = falcon_xaui_link_ok(efx); 270 mac_up = falcon_xmac_link_ok(efx);
265 --tries; 271 --tries;
266 } 272 }
267 273
@@ -272,7 +278,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
272 278
273static bool falcon_xmac_check_fault(struct efx_nic *efx) 279static bool falcon_xmac_check_fault(struct efx_nic *efx)
274{ 280{
275 return !falcon_check_xaui_link_up(efx, 5); 281 return !falcon_xmac_link_ok_retry(efx, 5);
276} 282}
277 283
278static int falcon_reconfigure_xmac(struct efx_nic *efx) 284static int falcon_reconfigure_xmac(struct efx_nic *efx)
@@ -284,7 +290,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
284 290
285 falcon_reconfigure_mac_wrapper(efx); 291 falcon_reconfigure_mac_wrapper(efx);
286 292
287 efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5); 293 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
288 falcon_mask_status_intr(efx, true); 294 falcon_mask_status_intr(efx, true);
289 295
290 return 0; 296 return 0;
@@ -357,7 +363,7 @@ void falcon_poll_xmac(struct efx_nic *efx)
357 return; 363 return;
358 364
359 falcon_mask_status_intr(efx, false); 365 falcon_mask_status_intr(efx, false);
360 efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1); 366 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
361 falcon_mask_status_intr(efx, true); 367 falcon_mask_status_intr(efx, true);
362} 368}
363 369
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 683353b904c7..9f035b9f0350 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -142,8 +142,9 @@ static int efx_mcdi_poll(struct efx_nic *efx)
142 if (spins != 0) { 142 if (spins != 0) {
143 --spins; 143 --spins;
144 udelay(1); 144 udelay(1);
145 } else 145 } else {
146 schedule(); 146 schedule_timeout_uninterruptible(1);
147 }
147 148
148 time = get_seconds(); 149 time = get_seconds();
149 150
@@ -803,7 +804,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
803 loff_t offset, u8 *buffer, size_t length) 804 loff_t offset, u8 *buffer, size_t length)
804{ 805{
805 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; 806 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
806 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)]; 807 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
807 size_t outlen; 808 size_t outlen;
808 int rc; 809 int rc;
809 810
@@ -827,7 +828,7 @@ fail:
827int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 828int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
828 loff_t offset, const u8 *buffer, size_t length) 829 loff_t offset, const u8 *buffer, size_t length)
829{ 830{
830 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)]; 831 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
831 int rc; 832 int rc;
832 833
833 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); 834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
@@ -837,7 +838,8 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
837 838
838 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 839 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
839 840
840 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf), 841 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
842 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
841 NULL, 0, NULL); 843 NULL, 0, NULL);
842 if (rc) 844 if (rc)
843 goto fail; 845 goto fail;
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index de916728c2e3..10ce98f4c0fb 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -111,6 +111,7 @@ extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
111extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 111extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
112 loff_t offset, const u8 *buffer, 112 loff_t offset, const u8 *buffer,
113 size_t length); 113 size_t length);
114#define EFX_MCDI_NVRAM_LEN_MAX 128
114extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 115extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
115 loff_t offset, size_t length); 116 loff_t offset, size_t length);
116extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, 117extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 2a85360a46f0..73e71f420624 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -1090,8 +1090,10 @@
1090#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 1090#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
1091#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 1091#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
1092#define MC_CMD_MAC_RX_MATCH_FAULT 59 1092#define MC_CMD_MAC_RX_MATCH_FAULT 59
1093#define MC_CMD_GMAC_DMABUF_START 64
1094#define MC_CMD_GMAC_DMABUF_END 95
1093/* Insert new members here. */ 1095/* Insert new members here. */
1094#define MC_CMD_MAC_GENERATION_END 60 1096#define MC_CMD_MAC_GENERATION_END 96
1095#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) 1097#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
1096 1098
1097/* MC_CMD_MAC_STATS: 1099/* MC_CMD_MAC_STATS:
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0e1bcc5a0d52..eb694af7a473 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -304,31 +304,47 @@ static u32 mcdi_to_ethtool_media(u32 media)
304 304
305static int efx_mcdi_phy_probe(struct efx_nic *efx) 305static int efx_mcdi_phy_probe(struct efx_nic *efx)
306{ 306{
307 struct efx_mcdi_phy_cfg *phy_cfg; 307 struct efx_mcdi_phy_cfg *phy_data;
308 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
309 u32 caps;
308 int rc; 310 int rc;
309 311
310 /* TODO: Move phy_data initialisation to 312 /* Initialise and populate phy_data */
311 * phy_op->probe/remove, rather than init/fini */ 313 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
312 phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL); 314 if (phy_data == NULL)
313 if (phy_cfg == NULL) { 315 return -ENOMEM;
314 rc = -ENOMEM; 316
315 goto fail_alloc; 317 rc = efx_mcdi_get_phy_cfg(efx, phy_data);
316 }
317 rc = efx_mcdi_get_phy_cfg(efx, phy_cfg);
318 if (rc != 0) 318 if (rc != 0)
319 goto fail; 319 goto fail;
320 320
321 efx->phy_type = phy_cfg->type; 321 /* Read initial link advertisement */
322 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
323 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
324 outbuf, sizeof(outbuf), NULL);
325 if (rc)
326 goto fail;
327
328 /* Fill out nic state */
329 efx->phy_data = phy_data;
330 efx->phy_type = phy_data->type;
322 331
323 efx->mdio_bus = phy_cfg->channel; 332 efx->mdio_bus = phy_data->channel;
324 efx->mdio.prtad = phy_cfg->port; 333 efx->mdio.prtad = phy_data->port;
325 efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); 334 efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
326 efx->mdio.mode_support = 0; 335 efx->mdio.mode_support = 0;
327 if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) 336 if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
328 efx->mdio.mode_support |= MDIO_SUPPORTS_C22; 337 efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
329 if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) 338 if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
330 efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 339 efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
331 340
341 caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
342 if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
343 efx->link_advertising =
344 mcdi_to_ethtool_cap(phy_data->media, caps);
345 else
346 phy_data->forced_cap = caps;
347
332 /* Assert that we can map efx -> mcdi loopback modes */ 348 /* Assert that we can map efx -> mcdi loopback modes */
333 BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); 349 BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
334 BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); 350 BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
@@ -365,46 +381,6 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
365 * but by convention we don't */ 381 * but by convention we don't */
366 efx->loopback_modes &= ~(1 << LOOPBACK_NONE); 382 efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
367 383
368 kfree(phy_cfg);
369
370 return 0;
371
372fail:
373 kfree(phy_cfg);
374fail_alloc:
375 return rc;
376}
377
378static int efx_mcdi_phy_init(struct efx_nic *efx)
379{
380 struct efx_mcdi_phy_cfg *phy_data;
381 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
382 u32 caps;
383 int rc;
384
385 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
386 if (phy_data == NULL)
387 return -ENOMEM;
388
389 rc = efx_mcdi_get_phy_cfg(efx, phy_data);
390 if (rc != 0)
391 goto fail;
392
393 efx->phy_data = phy_data;
394
395 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
396 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
397 outbuf, sizeof(outbuf), NULL);
398 if (rc)
399 goto fail;
400
401 caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
402 if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
403 efx->link_advertising =
404 mcdi_to_ethtool_cap(phy_data->media, caps);
405 else
406 phy_data->forced_cap = caps;
407
408 return 0; 384 return 0;
409 385
410fail: 386fail:
@@ -504,7 +480,7 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
504 return !efx_link_state_equal(&efx->link_state, &old_state); 480 return !efx_link_state_equal(&efx->link_state, &old_state);
505} 481}
506 482
507static void efx_mcdi_phy_fini(struct efx_nic *efx) 483static void efx_mcdi_phy_remove(struct efx_nic *efx)
508{ 484{
509 struct efx_mcdi_phy_data *phy_data = efx->phy_data; 485 struct efx_mcdi_phy_data *phy_data = efx->phy_data;
510 486
@@ -586,10 +562,11 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
586 562
587struct efx_phy_operations efx_mcdi_phy_ops = { 563struct efx_phy_operations efx_mcdi_phy_ops = {
588 .probe = efx_mcdi_phy_probe, 564 .probe = efx_mcdi_phy_probe,
589 .init = efx_mcdi_phy_init, 565 .init = efx_port_dummy_op_int,
590 .reconfigure = efx_mcdi_phy_reconfigure, 566 .reconfigure = efx_mcdi_phy_reconfigure,
591 .poll = efx_mcdi_phy_poll, 567 .poll = efx_mcdi_phy_poll,
592 .fini = efx_mcdi_phy_fini, 568 .fini = efx_port_dummy_op_void,
569 .remove = efx_mcdi_phy_remove,
593 .get_settings = efx_mcdi_phy_get_settings, 570 .get_settings = efx_mcdi_phy_get_settings,
594 .set_settings = efx_mcdi_phy_set_settings, 571 .set_settings = efx_mcdi_phy_set_settings,
595 .run_tests = NULL, 572 .run_tests = NULL,
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 3a464529a46b..407bbaddfea6 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -23,7 +23,6 @@
23#include "mcdi_pcol.h" 23#include "mcdi_pcol.h"
24 24
25#define EFX_SPI_VERIFY_BUF_LEN 16 25#define EFX_SPI_VERIFY_BUF_LEN 16
26#define EFX_MCDI_CHUNK_LEN 128
27 26
28struct efx_mtd_partition { 27struct efx_mtd_partition {
29 struct mtd_info mtd; 28 struct mtd_info mtd;
@@ -428,7 +427,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
428 int rc = 0; 427 int rc = 0;
429 428
430 while (offset < end) { 429 while (offset < end) {
431 chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); 430 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
432 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, 431 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
433 buffer, chunk); 432 buffer, chunk);
434 if (rc) 433 if (rc)
@@ -491,7 +490,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
491 } 490 }
492 491
493 while (offset < end) { 492 while (offset < end) {
494 chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); 493 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
495 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, 494 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
496 buffer, chunk); 495 buffer, chunk);
497 if (rc) 496 if (rc)
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 34c381f009b7..d5aab5b3fa06 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -524,6 +524,7 @@ struct efx_phy_operations {
524 int (*probe) (struct efx_nic *efx); 524 int (*probe) (struct efx_nic *efx);
525 int (*init) (struct efx_nic *efx); 525 int (*init) (struct efx_nic *efx);
526 void (*fini) (struct efx_nic *efx); 526 void (*fini) (struct efx_nic *efx);
527 void (*remove) (struct efx_nic *efx);
527 int (*reconfigure) (struct efx_nic *efx); 528 int (*reconfigure) (struct efx_nic *efx);
528 bool (*poll) (struct efx_nic *efx); 529 bool (*poll) (struct efx_nic *efx);
529 void (*get_settings) (struct efx_nic *efx, 530 void (*get_settings) (struct efx_nic *efx,
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index a577be227862..db44224ed2ca 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1576,6 +1576,8 @@ void efx_nic_init_common(struct efx_nic *efx)
1576 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1576 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1577 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1577 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1578 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1578 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1579 /* Disable hardware watchdog which can misfire */
1580 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1579 /* Squash TX of packets of 16 bytes or less */ 1581 /* Squash TX of packets of 16 bytes or less */
1580 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1582 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1581 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1583 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 3800fc791b2f..67eec7a6e487 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -33,6 +33,9 @@
33#define PCS_FW_HEARTBEAT_REG 0xd7ee 33#define PCS_FW_HEARTBEAT_REG 0xd7ee
34#define PCS_FW_HEARTB_LBN 0 34#define PCS_FW_HEARTB_LBN 0
35#define PCS_FW_HEARTB_WIDTH 8 35#define PCS_FW_HEARTB_WIDTH 8
36#define PCS_FW_PRODUCT_CODE_1 0xd7f0
37#define PCS_FW_VERSION_1 0xd7f3
38#define PCS_FW_BUILD_1 0xd7f6
36#define PCS_UC8051_STATUS_REG 0xd7fd 39#define PCS_UC8051_STATUS_REG 0xd7fd
37#define PCS_UC_STATUS_LBN 0 40#define PCS_UC_STATUS_LBN 0
38#define PCS_UC_STATUS_WIDTH 8 41#define PCS_UC_STATUS_WIDTH 8
@@ -52,14 +55,24 @@ void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
52 55
53struct qt202x_phy_data { 56struct qt202x_phy_data {
54 enum efx_phy_mode phy_mode; 57 enum efx_phy_mode phy_mode;
58 bool bug17190_in_bad_state;
59 unsigned long bug17190_timer;
60 u32 firmware_ver;
55}; 61};
56 62
57#define QT2022C2_MAX_RESET_TIME 500 63#define QT2022C2_MAX_RESET_TIME 500
58#define QT2022C2_RESET_WAIT 10 64#define QT2022C2_RESET_WAIT 10
59 65
60static int qt2025c_wait_reset(struct efx_nic *efx) 66#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
67#define QT2025C_HEARTB_WAIT 100
68#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
69#define QT2025C_FWSTART_WAIT 100
70
71#define BUG17190_INTERVAL (2 * HZ)
72
73static int qt2025c_wait_heartbeat(struct efx_nic *efx)
61{ 74{
62 unsigned long timeout = jiffies + 10 * HZ; 75 unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
63 int reg, old_counter = 0; 76 int reg, old_counter = 0;
64 77
65 /* Wait for firmware heartbeat to start */ 78 /* Wait for firmware heartbeat to start */
@@ -74,11 +87,25 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
74 old_counter = counter; 87 old_counter = counter;
75 else if (counter != old_counter) 88 else if (counter != old_counter)
76 break; 89 break;
77 if (time_after(jiffies, timeout)) 90 if (time_after(jiffies, timeout)) {
91 /* Some cables have EEPROMs that conflict with the
92 * PHY's on-board EEPROM so it cannot load firmware */
93 EFX_ERR(efx, "If an SFP+ direct attach cable is"
94 " connected, please check that it complies"
95 " with the SFP+ specification\n");
78 return -ETIMEDOUT; 96 return -ETIMEDOUT;
79 msleep(10); 97 }
98 msleep(QT2025C_HEARTB_WAIT);
80 } 99 }
81 100
101 return 0;
102}
103
104static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
105{
106 unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
107 int reg;
108
82 /* Wait for firmware status to look good */ 109 /* Wait for firmware status to look good */
83 for (;;) { 110 for (;;) {
84 reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG); 111 reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
@@ -90,7 +117,178 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
90 break; 117 break;
91 if (time_after(jiffies, timeout)) 118 if (time_after(jiffies, timeout))
92 return -ETIMEDOUT; 119 return -ETIMEDOUT;
120 msleep(QT2025C_FWSTART_WAIT);
121 }
122
123 return 0;
124}
125
126static void qt2025c_restart_firmware(struct efx_nic *efx)
127{
128 /* Restart microcontroller execution of firmware from RAM */
129 efx_mdio_write(efx, 3, 0xe854, 0x00c0);
130 efx_mdio_write(efx, 3, 0xe854, 0x0040);
131 msleep(50);
132}
133
134static int qt2025c_wait_reset(struct efx_nic *efx)
135{
136 int rc;
137
138 rc = qt2025c_wait_heartbeat(efx);
139 if (rc != 0)
140 return rc;
141
142 rc = qt2025c_wait_fw_status_good(efx);
143 if (rc == -ETIMEDOUT) {
144 /* Bug 17689: occasionally heartbeat starts but firmware status
145 * code never progresses beyond 0x00. Try again, once, after
146 * restarting execution of the firmware image. */
147 EFX_LOG(efx, "bashing QT2025C microcontroller\n");
148 qt2025c_restart_firmware(efx);
149 rc = qt2025c_wait_heartbeat(efx);
150 if (rc != 0)
151 return rc;
152 rc = qt2025c_wait_fw_status_good(efx);
153 }
154
155 return rc;
156}
157
158static void qt2025c_firmware_id(struct efx_nic *efx)
159{
160 struct qt202x_phy_data *phy_data = efx->phy_data;
161 u8 firmware_id[9];
162 size_t i;
163
164 for (i = 0; i < sizeof(firmware_id); i++)
165 firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
166 PCS_FW_PRODUCT_CODE_1 + i);
167 EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
168 (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
169 firmware_id[3] >> 4, firmware_id[3] & 0xf,
170 firmware_id[4], firmware_id[5],
171 firmware_id[6], firmware_id[7], firmware_id[8]);
172 phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
173 ((firmware_id[3] & 0x0f) << 16) |
174 (firmware_id[4] << 8) | firmware_id[5];
175}
176
177static void qt2025c_bug17190_workaround(struct efx_nic *efx)
178{
179 struct qt202x_phy_data *phy_data = efx->phy_data;
180
181 /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
182 * layers up, but PCS down (no block_lock). If we notice this state
183 * persisting for a couple of seconds, we switch PMA/PMD loopback
184 * briefly on and then off again, which is normally sufficient to
185 * recover it.
186 */
187 if (efx->link_state.up ||
188 !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
189 phy_data->bug17190_in_bad_state = false;
190 return;
191 }
192
193 if (!phy_data->bug17190_in_bad_state) {
194 phy_data->bug17190_in_bad_state = true;
195 phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
196 return;
197 }
198
199 if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
200 EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
201 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
202 MDIO_PMA_CTRL1_LOOPBACK, true);
93 msleep(100); 203 msleep(100);
204 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
205 MDIO_PMA_CTRL1_LOOPBACK, false);
206 phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
207 }
208}
209
210static int qt2025c_select_phy_mode(struct efx_nic *efx)
211{
212 struct qt202x_phy_data *phy_data = efx->phy_data;
213 struct falcon_board *board = falcon_board(efx);
214 int reg, rc, i;
215 uint16_t phy_op_mode;
216
217 /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
218 * Self-Configure mode. Don't attempt any switching if we encounter
219 * older firmware. */
220 if (phy_data->firmware_ver < 0x02000100)
221 return 0;
222
223 /* In general we will get optimal behaviour in "SFP+ Self-Configure"
224 * mode; however, that powers down most of the PHY when no module is
225 * present, so we must use a different mode (any fixed mode will do)
226 * to be sure that loopbacks will work. */
227 phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
228
229 /* Only change mode if really necessary */
230 reg = efx_mdio_read(efx, 1, 0xc319);
231 if ((reg & 0x0038) == phy_op_mode)
232 return 0;
233 EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
234
235 /* This sequence replicates the register writes configured in the boot
236 * EEPROM (including the differences between board revisions), except
237 * that the operating mode is changed, and the PHY is prevented from
238 * unnecessarily reloading the main firmware image again. */
239 efx_mdio_write(efx, 1, 0xc300, 0x0000);
240 /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
241 * STOPs onto the firmware/module I2C bus to reset it, varies across
242 * board revisions, as the bus is connected to different GPIO/LED
243 * outputs on the PHY.) */
244 if (board->major == 0 && board->minor < 2) {
245 efx_mdio_write(efx, 1, 0xc303, 0x4498);
246 for (i = 0; i < 9; i++) {
247 efx_mdio_write(efx, 1, 0xc303, 0x4488);
248 efx_mdio_write(efx, 1, 0xc303, 0x4480);
249 efx_mdio_write(efx, 1, 0xc303, 0x4490);
250 efx_mdio_write(efx, 1, 0xc303, 0x4498);
251 }
252 } else {
253 efx_mdio_write(efx, 1, 0xc303, 0x0920);
254 efx_mdio_write(efx, 1, 0xd008, 0x0004);
255 for (i = 0; i < 9; i++) {
256 efx_mdio_write(efx, 1, 0xc303, 0x0900);
257 efx_mdio_write(efx, 1, 0xd008, 0x0005);
258 efx_mdio_write(efx, 1, 0xc303, 0x0920);
259 efx_mdio_write(efx, 1, 0xd008, 0x0004);
260 }
261 efx_mdio_write(efx, 1, 0xc303, 0x4900);
262 }
263 efx_mdio_write(efx, 1, 0xc303, 0x4900);
264 efx_mdio_write(efx, 1, 0xc302, 0x0004);
265 efx_mdio_write(efx, 1, 0xc316, 0x0013);
266 efx_mdio_write(efx, 1, 0xc318, 0x0054);
267 efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
268 efx_mdio_write(efx, 1, 0xc31a, 0x0098);
269 efx_mdio_write(efx, 3, 0x0026, 0x0e00);
270 efx_mdio_write(efx, 3, 0x0027, 0x0013);
271 efx_mdio_write(efx, 3, 0x0028, 0xa528);
272 efx_mdio_write(efx, 1, 0xd006, 0x000a);
273 efx_mdio_write(efx, 1, 0xd007, 0x0009);
274 efx_mdio_write(efx, 1, 0xd008, 0x0004);
275 /* This additional write is not present in the boot EEPROM. It
276 * prevents the PHY's internal boot ROM doing another pointless (and
277 * slow) reload of the firmware image (the microcontroller's code
278 * memory is not affected by the microcontroller reset). */
279 efx_mdio_write(efx, 1, 0xc317, 0x00ff);
280 efx_mdio_write(efx, 1, 0xc300, 0x0002);
281 msleep(20);
282
283 /* Restart microcontroller execution of firmware from RAM */
284 qt2025c_restart_firmware(efx);
285
286 /* Wait for the microcontroller to be ready again */
287 rc = qt2025c_wait_reset(efx);
288 if (rc < 0) {
289 EFX_ERR(efx, "PHY microcontroller reset during mode switch "
290 "timed out\n");
291 return rc;
94 } 292 }
95 293
96 return 0; 294 return 0;
@@ -120,15 +318,9 @@ static int qt202x_reset_phy(struct efx_nic *efx)
120 /* Wait 250ms for the PHY to complete bootup */ 318 /* Wait 250ms for the PHY to complete bootup */
121 msleep(250); 319 msleep(250);
122 320
123 /* Check that all the MMDs we expect are present and responding. We
124 * expect faults on some if the link is down, but not on the PHY XS */
125 rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
126 if (rc < 0)
127 goto fail;
128
129 falcon_board(efx)->type->init_phy(efx); 321 falcon_board(efx)->type->init_phy(efx);
130 322
131 return rc; 323 return 0;
132 324
133 fail: 325 fail:
134 EFX_ERR(efx, "PHY reset timed out\n"); 326 EFX_ERR(efx, "PHY reset timed out\n");
@@ -137,6 +329,16 @@ static int qt202x_reset_phy(struct efx_nic *efx)
137 329
138static int qt202x_phy_probe(struct efx_nic *efx) 330static int qt202x_phy_probe(struct efx_nic *efx)
139{ 331{
332 struct qt202x_phy_data *phy_data;
333
334 phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
335 if (!phy_data)
336 return -ENOMEM;
337 efx->phy_data = phy_data;
338 phy_data->phy_mode = efx->phy_mode;
339 phy_data->bug17190_in_bad_state = false;
340 phy_data->bug17190_timer = 0;
341
140 efx->mdio.mmds = QT202X_REQUIRED_DEVS; 342 efx->mdio.mmds = QT202X_REQUIRED_DEVS;
141 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 343 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
142 efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS; 344 efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
@@ -145,7 +347,6 @@ static int qt202x_phy_probe(struct efx_nic *efx)
145 347
146static int qt202x_phy_init(struct efx_nic *efx) 348static int qt202x_phy_init(struct efx_nic *efx)
147{ 349{
148 struct qt202x_phy_data *phy_data;
149 u32 devid; 350 u32 devid;
150 int rc; 351 int rc;
151 352
@@ -155,17 +356,14 @@ static int qt202x_phy_init(struct efx_nic *efx)
155 return rc; 356 return rc;
156 } 357 }
157 358
158 phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
159 if (!phy_data)
160 return -ENOMEM;
161 efx->phy_data = phy_data;
162
163 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); 359 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
164 EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", 360 EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
165 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), 361 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
166 efx_mdio_id_rev(devid)); 362 efx_mdio_id_rev(devid));
167 363
168 phy_data->phy_mode = efx->phy_mode; 364 if (efx->phy_type == PHY_TYPE_QT2025C)
365 qt2025c_firmware_id(efx);
366
169 return 0; 367 return 0;
170} 368}
171 369
@@ -183,6 +381,9 @@ static bool qt202x_phy_poll(struct efx_nic *efx)
183 efx->link_state.fd = true; 381 efx->link_state.fd = true;
184 efx->link_state.fc = efx->wanted_fc; 382 efx->link_state.fc = efx->wanted_fc;
185 383
384 if (efx->phy_type == PHY_TYPE_QT2025C)
385 qt2025c_bug17190_workaround(efx);
386
186 return efx->link_state.up != was_up; 387 return efx->link_state.up != was_up;
187} 388}
188 389
@@ -191,6 +392,10 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx)
191 struct qt202x_phy_data *phy_data = efx->phy_data; 392 struct qt202x_phy_data *phy_data = efx->phy_data;
192 393
193 if (efx->phy_type == PHY_TYPE_QT2025C) { 394 if (efx->phy_type == PHY_TYPE_QT2025C) {
395 int rc = qt2025c_select_phy_mode(efx);
396 if (rc)
397 return rc;
398
194 /* There are several different register bits which can 399 /* There are several different register bits which can
195 * disable TX (and save power) on direct-attach cables 400 * disable TX (and save power) on direct-attach cables
196 * or optical transceivers, varying somewhat between 401 * or optical transceivers, varying somewhat between
@@ -224,7 +429,7 @@ static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecm
224 mdio45_ethtool_gset(&efx->mdio, ecmd); 429 mdio45_ethtool_gset(&efx->mdio, ecmd);
225} 430}
226 431
227static void qt202x_phy_fini(struct efx_nic *efx) 432static void qt202x_phy_remove(struct efx_nic *efx)
228{ 433{
229 /* Free the context block */ 434 /* Free the context block */
230 kfree(efx->phy_data); 435 kfree(efx->phy_data);
@@ -236,7 +441,8 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
236 .init = qt202x_phy_init, 441 .init = qt202x_phy_init,
237 .reconfigure = qt202x_phy_reconfigure, 442 .reconfigure = qt202x_phy_reconfigure,
238 .poll = qt202x_phy_poll, 443 .poll = qt202x_phy_poll,
239 .fini = qt202x_phy_fini, 444 .fini = efx_port_dummy_op_void,
445 .remove = qt202x_phy_remove,
240 .get_settings = qt202x_phy_get_settings, 446 .get_settings = qt202x_phy_get_settings,
241 .set_settings = efx_mdio_set_settings, 447 .set_settings = efx_mdio_set_settings,
242}; 448};
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index af3933579790..250c8827b842 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -79,10 +79,14 @@ struct efx_loopback_state {
79static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests) 79static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
80{ 80{
81 int rc = 0; 81 int rc = 0;
82 int devad = __ffs(efx->mdio.mmds); 82 int devad;
83 u16 physid1, physid2; 83 u16 physid1, physid2;
84 84
85 if (efx->phy_type == PHY_TYPE_NONE) 85 if (efx->mdio.mode_support & MDIO_SUPPORTS_C45)
86 devad = __ffs(efx->mdio.mmds);
87 else if (efx->mdio.mode_support & MDIO_SUPPORTS_C22)
88 devad = MDIO_DEVAD_NONE;
89 else
86 return 0; 90 return 0;
87 91
88 mutex_lock(&efx->mac_lock); 92 mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index de07a4f031b2..f8c6771e66d8 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -133,6 +133,7 @@ static int siena_probe_port(struct efx_nic *efx)
133 133
134void siena_remove_port(struct efx_nic *efx) 134void siena_remove_port(struct efx_nic *efx)
135{ 135{
136 efx->phy_op->remove(efx);
136 efx_nic_free_buffer(efx, &efx->stats_buffer); 137 efx_nic_free_buffer(efx, &efx->stats_buffer);
137} 138}
138 139
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index ca11572a49a9..3009c297c135 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -202,10 +202,14 @@ static ssize_t set_phy_short_reach(struct device *dev,
202 int rc; 202 int rc;
203 203
204 rtnl_lock(); 204 rtnl_lock();
205 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR, 205 if (efx->state != STATE_RUNNING) {
206 MDIO_PMA_10GBT_TXPWR_SHORT, 206 rc = -EBUSY;
207 count != 0 && *buf != '0'); 207 } else {
208 rc = efx_reconfigure_port(efx); 208 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
209 MDIO_PMA_10GBT_TXPWR_SHORT,
210 count != 0 && *buf != '0');
211 rc = efx_reconfigure_port(efx);
212 }
209 rtnl_unlock(); 213 rtnl_unlock();
210 214
211 return rc < 0 ? rc : (ssize_t)count; 215 return rc < 0 ? rc : (ssize_t)count;
@@ -298,36 +302,62 @@ static int tenxpress_init(struct efx_nic *efx)
298 return 0; 302 return 0;
299} 303}
300 304
301static int sfx7101_phy_probe(struct efx_nic *efx) 305static int tenxpress_phy_probe(struct efx_nic *efx)
302{ 306{
303 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; 307 struct tenxpress_phy_data *phy_data;
304 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 308 int rc;
305 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS; 309
306 return 0; 310 /* Allocate phy private storage */
307} 311 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
312 if (!phy_data)
313 return -ENOMEM;
314 efx->phy_data = phy_data;
315 phy_data->phy_mode = efx->phy_mode;
316
317 /* Create any special files */
318 if (efx->phy_type == PHY_TYPE_SFT9001B) {
319 rc = device_create_file(&efx->pci_dev->dev,
320 &dev_attr_phy_short_reach);
321 if (rc)
322 goto fail;
323 }
324
325 if (efx->phy_type == PHY_TYPE_SFX7101) {
326 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
327 efx->mdio.mode_support = MDIO_SUPPORTS_C45;
328
329 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
330
331 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
332 ADVERTISED_10000baseT_Full);
333 } else {
334 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
335 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
336
337 efx->loopback_modes = (SFT9001_LOOPBACKS |
338 FALCON_XMAC_LOOPBACKS |
339 FALCON_GMAC_LOOPBACKS);
340
341 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
342 ADVERTISED_10000baseT_Full |
343 ADVERTISED_1000baseT_Full |
344 ADVERTISED_100baseT_Full);
345 }
308 346
309static int sft9001_phy_probe(struct efx_nic *efx)
310{
311 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
312 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
313 efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS |
314 FALCON_GMAC_LOOPBACKS);
315 return 0; 347 return 0;
348
349fail:
350 kfree(efx->phy_data);
351 efx->phy_data = NULL;
352 return rc;
316} 353}
317 354
318static int tenxpress_phy_init(struct efx_nic *efx) 355static int tenxpress_phy_init(struct efx_nic *efx)
319{ 356{
320 struct tenxpress_phy_data *phy_data; 357 int rc;
321 int rc = 0;
322 358
323 falcon_board(efx)->type->init_phy(efx); 359 falcon_board(efx)->type->init_phy(efx);
324 360
325 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
326 if (!phy_data)
327 return -ENOMEM;
328 efx->phy_data = phy_data;
329 phy_data->phy_mode = efx->phy_mode;
330
331 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { 361 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
332 if (efx->phy_type == PHY_TYPE_SFT9001A) { 362 if (efx->phy_type == PHY_TYPE_SFT9001A) {
333 int reg; 363 int reg;
@@ -341,44 +371,27 @@ static int tenxpress_phy_init(struct efx_nic *efx)
341 371
342 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); 372 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
343 if (rc < 0) 373 if (rc < 0)
344 goto fail; 374 return rc;
345 375
346 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 376 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
347 if (rc < 0) 377 if (rc < 0)
348 goto fail; 378 return rc;
349 } 379 }
350 380
351 rc = tenxpress_init(efx); 381 rc = tenxpress_init(efx);
352 if (rc < 0) 382 if (rc < 0)
353 goto fail; 383 return rc;
354 384
355 /* Initialise advertising flags */ 385 /* Reinitialise flow control settings */
356 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
357 ADVERTISED_10000baseT_Full);
358 if (efx->phy_type != PHY_TYPE_SFX7101)
359 efx->link_advertising |= (ADVERTISED_1000baseT_Full |
360 ADVERTISED_100baseT_Full);
361 efx_link_set_wanted_fc(efx, efx->wanted_fc); 386 efx_link_set_wanted_fc(efx, efx->wanted_fc);
362 efx_mdio_an_reconfigure(efx); 387 efx_mdio_an_reconfigure(efx);
363 388
364 if (efx->phy_type == PHY_TYPE_SFT9001B) {
365 rc = device_create_file(&efx->pci_dev->dev,
366 &dev_attr_phy_short_reach);
367 if (rc)
368 goto fail;
369 }
370
371 schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ 389 schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
372 390
373 /* Let XGXS and SerDes out of reset */ 391 /* Let XGXS and SerDes out of reset */
374 falcon_reset_xaui(efx); 392 falcon_reset_xaui(efx);
375 393
376 return 0; 394 return 0;
377
378 fail:
379 kfree(efx->phy_data);
380 efx->phy_data = NULL;
381 return rc;
382} 395}
383 396
384/* Perform a "special software reset" on the PHY. The caller is 397/* Perform a "special software reset" on the PHY. The caller is
@@ -589,25 +602,26 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
589 return !efx_link_state_equal(&efx->link_state, &old_state); 602 return !efx_link_state_equal(&efx->link_state, &old_state);
590} 603}
591 604
592static void tenxpress_phy_fini(struct efx_nic *efx) 605static void sfx7101_phy_fini(struct efx_nic *efx)
593{ 606{
594 int reg; 607 int reg;
595 608
609 /* Power down the LNPGA */
610 reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
611 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
612
613 /* Waiting here ensures that the board fini, which can turn
614 * off the power to the PHY, won't get run until the LNPGA
615 * powerdown has been given long enough to complete. */
616 schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
617}
618
619static void tenxpress_phy_remove(struct efx_nic *efx)
620{
596 if (efx->phy_type == PHY_TYPE_SFT9001B) 621 if (efx->phy_type == PHY_TYPE_SFT9001B)
597 device_remove_file(&efx->pci_dev->dev, 622 device_remove_file(&efx->pci_dev->dev,
598 &dev_attr_phy_short_reach); 623 &dev_attr_phy_short_reach);
599 624
600 if (efx->phy_type == PHY_TYPE_SFX7101) {
601 /* Power down the LNPGA */
602 reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
603 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
604
605 /* Waiting here ensures that the board fini, which can turn
606 * off the power to the PHY, won't get run until the LNPGA
607 * powerdown has been given long enough to complete. */
608 schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
609 }
610
611 kfree(efx->phy_data); 625 kfree(efx->phy_data);
612 efx->phy_data = NULL; 626 efx->phy_data = NULL;
613} 627}
@@ -819,11 +833,12 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
819} 833}
820 834
821struct efx_phy_operations falcon_sfx7101_phy_ops = { 835struct efx_phy_operations falcon_sfx7101_phy_ops = {
822 .probe = sfx7101_phy_probe, 836 .probe = tenxpress_phy_probe,
823 .init = tenxpress_phy_init, 837 .init = tenxpress_phy_init,
824 .reconfigure = tenxpress_phy_reconfigure, 838 .reconfigure = tenxpress_phy_reconfigure,
825 .poll = tenxpress_phy_poll, 839 .poll = tenxpress_phy_poll,
826 .fini = tenxpress_phy_fini, 840 .fini = sfx7101_phy_fini,
841 .remove = tenxpress_phy_remove,
827 .get_settings = tenxpress_get_settings, 842 .get_settings = tenxpress_get_settings,
828 .set_settings = tenxpress_set_settings, 843 .set_settings = tenxpress_set_settings,
829 .set_npage_adv = sfx7101_set_npage_adv, 844 .set_npage_adv = sfx7101_set_npage_adv,
@@ -832,11 +847,12 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
832}; 847};
833 848
834struct efx_phy_operations falcon_sft9001_phy_ops = { 849struct efx_phy_operations falcon_sft9001_phy_ops = {
835 .probe = sft9001_phy_probe, 850 .probe = tenxpress_phy_probe,
836 .init = tenxpress_phy_init, 851 .init = tenxpress_phy_init,
837 .reconfigure = tenxpress_phy_reconfigure, 852 .reconfigure = tenxpress_phy_reconfigure,
838 .poll = tenxpress_phy_poll, 853 .poll = tenxpress_phy_poll,
839 .fini = tenxpress_phy_fini, 854 .fini = efx_port_dummy_op_void,
855 .remove = tenxpress_phy_remove,
840 .get_settings = tenxpress_get_settings, 856 .get_settings = tenxpress_get_settings,
841 .set_settings = tenxpress_set_settings, 857 .set_settings = tenxpress_set_settings,
842 .set_npage_adv = sft9001_set_npage_adv, 858 .set_npage_adv = sft9001_set_npage_adv,
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index e669f94e821b..a8b70ef6d817 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -821,8 +821,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
821 EFX_TXQ_MASK]; 821 EFX_TXQ_MASK];
822 efx_tsoh_free(tx_queue, buffer); 822 efx_tsoh_free(tx_queue, buffer);
823 EFX_BUG_ON_PARANOID(buffer->skb); 823 EFX_BUG_ON_PARANOID(buffer->skb);
824 buffer->len = 0;
825 buffer->continuation = true;
826 if (buffer->unmap_len) { 824 if (buffer->unmap_len) {
827 unmap_addr = (buffer->dma_addr + buffer->len - 825 unmap_addr = (buffer->dma_addr + buffer->len -
828 buffer->unmap_len); 826 buffer->unmap_len);
@@ -836,6 +834,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
836 PCI_DMA_TODEVICE); 834 PCI_DMA_TODEVICE);
837 buffer->unmap_len = 0; 835 buffer->unmap_len = 0;
838 } 836 }
837 buffer->len = 0;
838 buffer->continuation = true;
839 } 839 }
840} 840}
841 841
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index ca6285016dfd..7402b858cab7 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -110,7 +110,7 @@ static void sh_eth_reset(struct net_device *ndev)
110 mdelay(1); 110 mdelay(1);
111 cnt--; 111 cnt--;
112 } 112 }
113 if (cnt < 0) 113 if (cnt == 0)
114 printk(KERN_ERR "Device reset fail\n"); 114 printk(KERN_ERR "Device reset fail\n");
115 115
116 /* Table Init */ 116 /* Table Init */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 1c01b96c9611..67249c3c9f50 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -644,6 +644,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
644{ 644{
645 u32 reg1; 645 u32 reg1;
646 646
647 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
647 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 648 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
648 reg1 &= ~phy_power[port]; 649 reg1 &= ~phy_power[port];
649 650
@@ -651,6 +652,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
651 reg1 |= coma_mode[port]; 652 reg1 |= coma_mode[port];
652 653
653 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 654 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
655 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
654 sky2_pci_read32(hw, PCI_DEV_REG1); 656 sky2_pci_read32(hw, PCI_DEV_REG1);
655 657
656 if (hw->chip_id == CHIP_ID_YUKON_FE) 658 if (hw->chip_id == CHIP_ID_YUKON_FE)
@@ -707,9 +709,11 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
707 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); 709 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
708 } 710 }
709 711
712 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
710 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 713 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
711 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ 714 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
712 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 715 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
716 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
713} 717}
714 718
715/* Force a renegotiation */ 719/* Force a renegotiation */
@@ -1021,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
1021static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) 1025static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
1022{ 1026{
1023 struct sky2_tx_le *le = sky2->tx_le + *slot; 1027 struct sky2_tx_le *le = sky2->tx_le + *slot;
1024 struct tx_ring_info *re = sky2->tx_ring + *slot;
1025 1028
1026 *slot = RING_NEXT(*slot, sky2->tx_ring_size); 1029 *slot = RING_NEXT(*slot, sky2->tx_ring_size);
1027 re->flags = 0;
1028 re->skb = NULL;
1029 le->ctrl = 0; 1030 le->ctrl = 0;
1030 return le; 1031 return le;
1031} 1032}
@@ -1618,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1618 return count; 1619 return count;
1619} 1620}
1620 1621
1621static void sky2_tx_unmap(struct pci_dev *pdev, 1622static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
1622 const struct tx_ring_info *re)
1623{ 1623{
1624 if (re->flags & TX_MAP_SINGLE) 1624 if (re->flags & TX_MAP_SINGLE)
1625 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), 1625 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1629,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
1629 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), 1629 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
1630 pci_unmap_len(re, maplen), 1630 pci_unmap_len(re, maplen),
1631 PCI_DMA_TODEVICE); 1631 PCI_DMA_TODEVICE);
1632 re->flags = 0;
1632} 1633}
1633 1634
1634/* 1635/*
@@ -1835,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1835 dev->stats.tx_packets++; 1836 dev->stats.tx_packets++;
1836 dev->stats.tx_bytes += skb->len; 1837 dev->stats.tx_bytes += skb->len;
1837 1838
1839 re->skb = NULL;
1838 dev_kfree_skb_any(skb); 1840 dev_kfree_skb_any(skb);
1839 1841
1840 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); 1842 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
@@ -1844,7 +1846,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1844 sky2->tx_cons = idx; 1846 sky2->tx_cons = idx;
1845 smp_mb(); 1847 smp_mb();
1846 1848
1847 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) 1849 /* Wake unless it's detached, and called e.g. from sky2_down() */
1850 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
1848 netif_wake_queue(dev); 1851 netif_wake_queue(dev);
1849} 1852}
1850 1853
@@ -2148,7 +2151,9 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
2148 2151
2149 /* reset PHY Link Detect */ 2152 /* reset PHY Link Detect */
2150 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); 2153 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
2154 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2151 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); 2155 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
2156 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2152 2157
2153 sky2_link_up(sky2); 2158 sky2_link_up(sky2);
2154} 2159}
@@ -2639,6 +2644,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2639 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2644 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2640 u16 pci_err; 2645 u16 pci_err;
2641 2646
2647 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2642 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2648 pci_err = sky2_pci_read16(hw, PCI_STATUS);
2643 if (net_ratelimit()) 2649 if (net_ratelimit())
2644 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", 2650 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
@@ -2646,12 +2652,14 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2646 2652
2647 sky2_pci_write16(hw, PCI_STATUS, 2653 sky2_pci_write16(hw, PCI_STATUS,
2648 pci_err | PCI_STATUS_ERROR_BITS); 2654 pci_err | PCI_STATUS_ERROR_BITS);
2655 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2649 } 2656 }
2650 2657
2651 if (status & Y2_IS_PCI_EXP) { 2658 if (status & Y2_IS_PCI_EXP) {
2652 /* PCI-Express uncorrectable Error occurred */ 2659 /* PCI-Express uncorrectable Error occurred */
2653 u32 err; 2660 u32 err;
2654 2661
2662 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2655 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2663 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2656 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 2664 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2657 0xfffffffful); 2665 0xfffffffful);
@@ -2659,6 +2667,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2659 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); 2667 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
2660 2668
2661 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2669 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2670 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2662 } 2671 }
2663 2672
2664 if (status & Y2_HWE_L1_MASK) 2673 if (status & Y2_HWE_L1_MASK)
@@ -3037,6 +3046,7 @@ static void sky2_reset(struct sky2_hw *hw)
3037 } 3046 }
3038 3047
3039 sky2_power_on(hw); 3048 sky2_power_on(hw);
3049 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3040 3050
3041 for (i = 0; i < hw->ports; i++) { 3051 for (i = 0; i < hw->ports; i++) {
3042 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 3052 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -3073,6 +3083,7 @@ static void sky2_reset(struct sky2_hw *hw)
3073 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; 3083 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
3074 3084
3075 /* reset PHY Link Detect */ 3085 /* reset PHY Link Detect */
3086 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3076 sky2_pci_write16(hw, PSM_CONFIG_REG4, 3087 sky2_pci_write16(hw, PSM_CONFIG_REG4,
3077 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); 3088 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
3078 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); 3089 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
@@ -3090,6 +3101,7 @@ static void sky2_reset(struct sky2_hw *hw)
3090 /* restore the PCIe Link Control register */ 3101 /* restore the PCIe Link Control register */
3091 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); 3102 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
3092 } 3103 }
3104 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3093 3105
3094 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ 3106 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3095 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); 3107 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
@@ -3227,6 +3239,27 @@ static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
3227 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; 3239 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
3228} 3240}
3229 3241
3242static void sky2_hw_set_wol(struct sky2_hw *hw)
3243{
3244 int wol = 0;
3245 int i;
3246
3247 for (i = 0; i < hw->ports; i++) {
3248 struct net_device *dev = hw->dev[i];
3249 struct sky2_port *sky2 = netdev_priv(dev);
3250
3251 if (sky2->wol)
3252 wol = 1;
3253 }
3254
3255 if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3256 hw->chip_id == CHIP_ID_YUKON_EX ||
3257 hw->chip_id == CHIP_ID_YUKON_FE_P)
3258 sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
3259
3260 device_set_wakeup_enable(&hw->pdev->dev, wol);
3261}
3262
3230static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 3263static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3231{ 3264{
3232 const struct sky2_port *sky2 = netdev_priv(dev); 3265 const struct sky2_port *sky2 = netdev_priv(dev);
@@ -3246,13 +3279,7 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3246 3279
3247 sky2->wol = wol->wolopts; 3280 sky2->wol = wol->wolopts;
3248 3281
3249 if (hw->chip_id == CHIP_ID_YUKON_EC_U || 3282 sky2_hw_set_wol(hw);
3250 hw->chip_id == CHIP_ID_YUKON_EX ||
3251 hw->chip_id == CHIP_ID_YUKON_FE_P)
3252 sky2_write32(hw, B0_CTST, sky2->wol
3253 ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
3254
3255 device_set_wakeup_enable(&hw->pdev->dev, sky2->wol);
3256 3283
3257 if (!netif_running(dev)) 3284 if (!netif_running(dev))
3258 sky2_wol_init(sky2); 3285 sky2_wol_init(sky2);
@@ -4684,6 +4711,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4684 INIT_WORK(&hw->restart_work, sky2_restart); 4711 INIT_WORK(&hw->restart_work, sky2_restart);
4685 4712
4686 pci_set_drvdata(pdev, hw); 4713 pci_set_drvdata(pdev, hw);
4714 pdev->d3_delay = 150;
4687 4715
4688 return 0; 4716 return 0;
4689 4717
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 95db60adde41..f9521136a869 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev)
1063 if (retval) { 1063 if (retval) {
1064 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n", 1064 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1065 FIRMWARE_RX); 1065 FIRMWARE_RX);
1066 return retval; 1066 goto out_init;
1067 } 1067 }
1068 if (fw_rx->size % 4) { 1068 if (fw_rx->size % 4) {
1069 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n", 1069 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
@@ -1108,6 +1108,9 @@ out_tx:
1108 release_firmware(fw_tx); 1108 release_firmware(fw_tx);
1109out_rx: 1109out_rx:
1110 release_firmware(fw_rx); 1110 release_firmware(fw_rx);
1111out_init:
1112 if (retval)
1113 netdev_close(dev);
1111 return retval; 1114 return retval;
1112} 1115}
1113 1116
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 3a74d2168598..7f82b0238e08 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation. 7 * Copyright (C) 2005-2010 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Derived from proprietary unpublished source code, 10 * Derived from proprietary unpublished source code,
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.105" 71#define DRV_MODULE_VERSION "3.106"
72#define DRV_MODULE_RELDATE "December 2, 2009" 72#define DRV_MODULE_RELDATE "January 12, 2010"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -1037,7 +1037,11 @@ static void tg3_mdio_start(struct tg3 *tp)
1037 else 1037 else
1038 tp->phy_addr = 1; 1038 tp->phy_addr = 1;
1039 1039
1040 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1040 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1041 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1042 else
1043 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1044 TG3_CPMU_PHY_STRAP_IS_SERDES;
1041 if (is_serdes) 1045 if (is_serdes)
1042 tp->phy_addr += 7; 1046 tp->phy_addr += 7;
1043 } else 1047 } else
@@ -4693,8 +4697,9 @@ next_pkt:
4693 (*post_ptr)++; 4697 (*post_ptr)++;
4694 4698
4695 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 4699 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4696 u32 idx = *post_ptr % TG3_RX_RING_SIZE; 4700 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4697 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx); 4701 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4702 tpr->rx_std_prod_idx);
4698 work_mask &= ~RXD_OPAQUE_RING_STD; 4703 work_mask &= ~RXD_OPAQUE_RING_STD;
4699 rx_std_posted = 0; 4704 rx_std_posted = 0;
4700 } 4705 }
@@ -7742,7 +7747,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7742 ((u64) tpr->rx_std_mapping >> 32)); 7747 ((u64) tpr->rx_std_mapping >> 32));
7743 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7748 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7744 ((u64) tpr->rx_std_mapping & 0xffffffff)); 7749 ((u64) tpr->rx_std_mapping & 0xffffffff));
7745 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) 7750 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7746 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7751 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7747 NIC_SRAM_RX_BUFFER_DESC); 7752 NIC_SRAM_RX_BUFFER_DESC);
7748 7753
@@ -12122,7 +12127,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12122 12127
12123 tp->phy_id = eeprom_phy_id; 12128 tp->phy_id = eeprom_phy_id;
12124 if (eeprom_phy_serdes) { 12129 if (eeprom_phy_serdes) {
12125 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 12130 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
12131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12126 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; 12132 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12127 else 12133 else
12128 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 12134 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
@@ -13384,6 +13390,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13384 if (err) 13390 if (err)
13385 return err; 13391 return err;
13386 13392
13393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
13394 (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 ||
13395 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
13396 return -ENOTSUPP;
13397
13387 /* Initialize data/descriptor byte/word swapping. */ 13398 /* Initialize data/descriptor byte/word swapping. */
13388 val = tr32(GRC_MODE); 13399 val = tr32(GRC_MODE);
13389 val &= GRC_MODE_HOST_STACKUP; 13400 val &= GRC_MODE_HOST_STACKUP;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index cd30889650f8..8a167912902b 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,6 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2007-2010 Broadcom Corporation.
7 */ 8 */
8 9
9#ifndef _T3_H 10#ifndef _T3_H
@@ -1054,6 +1055,8 @@
1054#define CPMU_MUTEX_REQ_DRIVER 0x00001000 1055#define CPMU_MUTEX_REQ_DRIVER 0x00001000
1055#define TG3_CPMU_MUTEX_GNT 0x00003660 1056#define TG3_CPMU_MUTEX_GNT 0x00003660
1056#define CPMU_MUTEX_GNT_DRIVER 0x00001000 1057#define CPMU_MUTEX_GNT_DRIVER 0x00001000
1058#define TG3_CPMU_PHY_STRAP 0x00003664
1059#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
1057/* 0x3664 --> 0x3800 unused */ 1060/* 0x3664 --> 0x3800 unused */
1058 1061
1059/* Mbuf cluster free registers */ 1062/* Mbuf cluster free registers */
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index 1cc8cf4425d1..516713fa0a05 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -101,6 +101,10 @@ config TULIP_NAPI_HW_MITIGATION
101 101
102 If in doubt, say Y. 102 If in doubt, say Y.
103 103
104config TULIP_DM910X
105 def_bool y
106 depends on TULIP && SPARC
107
104config DE4X5 108config DE4X5
105 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" 109 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
106 depends on PCI || EISA 110 depends on PCI || EISA
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index ad63621913c3..6f44ebf58910 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -92,6 +92,10 @@
92#include <asm/uaccess.h> 92#include <asm/uaccess.h>
93#include <asm/irq.h> 93#include <asm/irq.h>
94 94
95#ifdef CONFIG_TULIP_DM910X
96#include <linux/of.h>
97#endif
98
95 99
96/* Board/System/Debug information/definition ---------------- */ 100/* Board/System/Debug information/definition ---------------- */
97#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */ 101#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
@@ -377,6 +381,23 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
377 if (!printed_version++) 381 if (!printed_version++)
378 printk(version); 382 printk(version);
379 383
384 /*
385 * SPARC on-board DM910x chips should be handled by the main
386 * tulip driver, except for early DM9100s.
387 */
388#ifdef CONFIG_TULIP_DM910X
389 if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
390 ent->driver_data == PCI_DM9102_ID) {
391 struct device_node *dp = pci_device_to_OF_node(pdev);
392
393 if (dp && of_get_property(dp, "local-mac-address", NULL)) {
394 printk(KERN_INFO DRV_NAME
395 ": skipping on-board DM910x (use tulip)\n");
396 return -ENODEV;
397 }
398 }
399#endif
400
380 /* Init network device */ 401 /* Init network device */
381 dev = alloc_etherdev(sizeof(*db)); 402 dev = alloc_etherdev(sizeof(*db));
382 if (dev == NULL) 403 if (dev == NULL)
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 0fa3140d65bf..20696b5d60a5 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -196,9 +196,13 @@ struct tulip_chip_table tulip_tbl[] = {
196 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task }, 196 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
197 197
198 /* DM910X */ 198 /* DM910X */
199#ifdef CONFIG_TULIP_DM910X
199 { "Davicom DM9102/DM9102A", 128, 0x0001ebef, 200 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
200 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, 201 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
201 tulip_timer, tulip_media_task }, 202 tulip_timer, tulip_media_task },
203#else
204 { NULL },
205#endif
202 206
203 /* RS7112 */ 207 /* RS7112 */
204 { "Conexant LANfinity", 256, 0x0001ebef, 208 { "Conexant LANfinity", 256, 0x0001ebef,
@@ -228,8 +232,10 @@ static struct pci_device_id tulip_pci_tbl[] = {
228 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 232 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 }, 233 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
230 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 }, 234 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
235#ifdef CONFIG_TULIP_DM910X
231 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, 236 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
232 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, 237 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
238#endif
233 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 239 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
234 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, 240 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
235 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 241 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
@@ -243,6 +249,7 @@ static struct pci_device_id tulip_pci_tbl[] = {
243 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 249 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ 250 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
245 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ 251 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
252 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
246 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 253 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
247 { } /* terminate list */ 254 { } /* terminate list */
248}; 255};
@@ -1299,18 +1306,30 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1299 } 1306 }
1300 1307
1301 /* 1308 /*
1302 * Early DM9100's need software CRC and the DMFE driver 1309 * DM910x chips should be handled by the dmfe driver, except
1310 * on-board chips on SPARC systems. Also, early DM9100s need
1311 * software CRC which only the dmfe driver supports.
1303 */ 1312 */
1304 1313
1305 if (pdev->vendor == 0x1282 && pdev->device == 0x9100) 1314#ifdef CONFIG_TULIP_DM910X
1306 { 1315 if (chip_idx == DM910X) {
1307 /* Read Chip revision */ 1316 struct device_node *dp;
1308 if (pdev->revision < 0x30) 1317
1309 { 1318 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1310 printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n"); 1319 pdev->revision < 0x30) {
1320 printk(KERN_INFO PFX
1321 "skipping early DM9100 with Crc bug (use dmfe)\n");
1322 return -ENODEV;
1323 }
1324
1325 dp = pci_device_to_OF_node(pdev);
1326 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1327 printk(KERN_INFO PFX
1328 "skipping DM910x expansion card (use dmfe)\n");
1311 return -ENODEV; 1329 return -ENODEV;
1312 } 1330 }
1313 } 1331 }
1332#endif
1314 1333
1315 /* 1334 /*
1316 * Looks for early PCI chipsets where people report hangs 1335 * Looks for early PCI chipsets where people report hangs
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 01e99f22210e..2834a01bae24 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -849,13 +849,13 @@ static void tun_sock_write_space(struct sock *sk)
849 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 849 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
850 wake_up_interruptible_sync(sk->sk_sleep); 850 wake_up_interruptible_sync(sk->sk_sleep);
851 851
852 tun = container_of(sk, struct tun_sock, sk)->tun; 852 tun = tun_sk(sk)->tun;
853 kill_fasync(&tun->fasync, SIGIO, POLL_OUT); 853 kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
854} 854}
855 855
856static void tun_sock_destruct(struct sock *sk) 856static void tun_sock_destruct(struct sock *sk)
857{ 857{
858 free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev); 858 free_netdev(tun_sk(sk)->tun->dev);
859} 859}
860 860
861static struct proto tun_proto = { 861static struct proto tun_proto = {
@@ -990,7 +990,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
990 sk->sk_write_space = tun_sock_write_space; 990 sk->sk_write_space = tun_sock_write_space;
991 sk->sk_sndbuf = INT_MAX; 991 sk->sk_sndbuf = INT_MAX;
992 992
993 container_of(sk, struct tun_sock, sk)->tun = tun; 993 tun_sk(sk)->tun = tun;
994 994
995 security_tun_dev_post_create(sk); 995 security_tun_dev_post_create(sk);
996 996
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index afaf088b72ea..eb8fe7e16c6c 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1563,7 +1563,10 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1563 1563
1564static void ugeth_quiesce(struct ucc_geth_private *ugeth) 1564static void ugeth_quiesce(struct ucc_geth_private *ugeth)
1565{ 1565{
1566 /* Wait for and prevent any further xmits. */ 1566 /* Prevent any further xmits, plus detach the device. */
1567 netif_device_detach(ugeth->ndev);
1568
1569 /* Wait for any current xmits to finish. */
1567 netif_tx_disable(ugeth->ndev); 1570 netif_tx_disable(ugeth->ndev);
1568 1571
1569 /* Disable the interrupt to avoid NAPI rescheduling. */ 1572 /* Disable the interrupt to avoid NAPI rescheduling. */
@@ -1577,7 +1580,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
1577{ 1580{
1578 napi_enable(&ugeth->napi); 1581 napi_enable(&ugeth->napi);
1579 enable_irq(ugeth->ug_info->uf_info.irq); 1582 enable_irq(ugeth->ug_info->uf_info.irq);
1580 netif_tx_wake_all_queues(ugeth->ndev); 1583 netif_device_attach(ugeth->ndev);
1581} 1584}
1582 1585
1583/* Called every time the controller might need to be made 1586/* Called every time the controller might need to be made
@@ -1648,25 +1651,28 @@ static void adjust_link(struct net_device *dev)
1648 ugeth->oldspeed = phydev->speed; 1651 ugeth->oldspeed = phydev->speed;
1649 } 1652 }
1650 1653
1651 /*
1652 * To change the MAC configuration we need to disable the
1653 * controller. To do so, we have to either grab ugeth->lock,
1654 * which is a bad idea since 'graceful stop' commands might
1655 * take quite a while, or we can quiesce driver's activity.
1656 */
1657 ugeth_quiesce(ugeth);
1658 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
1659
1660 out_be32(&ug_regs->maccfg2, tempval);
1661 out_be32(&uf_regs->upsmr, upsmr);
1662
1663 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
1664 ugeth_activate(ugeth);
1665
1666 if (!ugeth->oldlink) { 1654 if (!ugeth->oldlink) {
1667 new_state = 1; 1655 new_state = 1;
1668 ugeth->oldlink = 1; 1656 ugeth->oldlink = 1;
1669 } 1657 }
1658
1659 if (new_state) {
1660 /*
1661 * To change the MAC configuration we need to disable
1662 * the controller. To do so, we have to either grab
1663 * ugeth->lock, which is a bad idea since 'graceful
1664 * stop' commands might take quite a while, or we can
1665 * quiesce driver's activity.
1666 */
1667 ugeth_quiesce(ugeth);
1668 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
1669
1670 out_be32(&ug_regs->maccfg2, tempval);
1671 out_be32(&uf_regs->upsmr, upsmr);
1672
1673 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
1674 ugeth_activate(ugeth);
1675 }
1670 } else if (ugeth->oldlink) { 1676 } else if (ugeth->oldlink) {
1671 new_state = 1; 1677 new_state = 1;
1672 ugeth->oldlink = 0; 1678 ugeth->oldlink = 0;
@@ -3273,13 +3279,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3273 /* Handle the transmitted buffer and release */ 3279 /* Handle the transmitted buffer and release */
3274 /* the BD to be used with the current frame */ 3280 /* the BD to be used with the current frame */
3275 3281
3276 if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) 3282 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
3283 if (!skb)
3277 break; 3284 break;
3278 3285
3279 dev->stats.tx_packets++; 3286 dev->stats.tx_packets++;
3280 3287
3281 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
3282
3283 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3288 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
3284 skb_recycle_check(skb, 3289 skb_recycle_check(skb,
3285 ugeth->ug_info->uf_info.max_rx_buf_length + 3290 ugeth->ug_info->uf_info.max_rx_buf_length +
@@ -3601,6 +3606,7 @@ static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state)
3601 if (!netif_running(ndev)) 3606 if (!netif_running(ndev))
3602 return 0; 3607 return 0;
3603 3608
3609 netif_device_detach(ndev);
3604 napi_disable(&ugeth->napi); 3610 napi_disable(&ugeth->napi);
3605 3611
3606 /* 3612 /*
@@ -3659,7 +3665,7 @@ static int ucc_geth_resume(struct of_device *ofdev)
3659 phy_start(ugeth->phydev); 3665 phy_start(ugeth->phydev);
3660 3666
3661 napi_enable(&ugeth->napi); 3667 napi_enable(&ugeth->napi);
3662 netif_start_queue(ndev); 3668 netif_device_attach(ndev);
3663 3669
3664 return 0; 3670 return 0;
3665} 3671}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index a007e2acf651..ef1fbeb11c6e 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -838,13 +838,13 @@ struct ucc_geth_hardware_statistics {
838 using the maximum is 838 using the maximum is
839 easier */ 839 easier */
840#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32 840#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
841#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */ 841#define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */
842#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */ 842#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
843#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */ 843#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
844#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64 844#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64
845#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */ 845#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
846#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */ 846#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
847#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This 847#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This
848 is a 848 is a
849 guess 849 guess
850 */ 850 */
@@ -899,16 +899,17 @@ struct ucc_geth_hardware_statistics {
899#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size 899#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
900 */ 900 */
901#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ 901#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
902#define UCC_GETH_UTFTT_INIT 128 902#define UCC_GETH_UTFTT_INIT 512
903/* Gigabit Ethernet (1000 Mbps) */ 903/* Gigabit Ethernet (1000 Mbps) */
904#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual 904#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
905 FIFO size */ 905 FIFO size */
906#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */ 906#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
907#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */ 907#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
908#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual 908#define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual
909 FIFO size */
910#define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */
911#define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual
909 FIFO size */ 912 FIFO size */
910#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
911#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
912 913
913#define UCC_GETH_REMODER_INIT 0 /* bits that must be 914#define UCC_GETH_REMODER_INIT 0 /* bits that must be
914 set */ 915 set */
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 21e183a83b99..4f27f022fbf7 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -419,7 +419,7 @@ static int cdc_manage_power(struct usbnet *dev, int on)
419 419
420static const struct driver_info cdc_info = { 420static const struct driver_info cdc_info = {
421 .description = "CDC Ethernet Device", 421 .description = "CDC Ethernet Device",
422 .flags = FLAG_ETHER | FLAG_LINK_INTR, 422 .flags = FLAG_ETHER,
423 // .check_connect = cdc_check_connect, 423 // .check_connect = cdc_check_connect,
424 .bind = cdc_bind, 424 .bind = cdc_bind,
425 .unbind = usbnet_cdc_unbind, 425 .unbind = usbnet_cdc_unbind,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f78f0903b073..6895f1531238 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -286,6 +286,7 @@ struct hso_device {
286 u8 usb_gone; 286 u8 usb_gone;
287 struct work_struct async_get_intf; 287 struct work_struct async_get_intf;
288 struct work_struct async_put_intf; 288 struct work_struct async_put_intf;
289 struct work_struct reset_device;
289 290
290 struct usb_device *usb; 291 struct usb_device *usb;
291 struct usb_interface *interface; 292 struct usb_interface *interface;
@@ -332,7 +333,8 @@ static void hso_kick_transmit(struct hso_serial *serial);
332/* Helper functions */ 333/* Helper functions */
333static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, 334static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int,
334 struct usb_device *usb, gfp_t gfp); 335 struct usb_device *usb, gfp_t gfp);
335static void log_usb_status(int status, const char *function); 336static void handle_usb_error(int status, const char *function,
337 struct hso_device *hso_dev);
336static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, 338static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf,
337 int type, int dir); 339 int type, int dir);
338static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports); 340static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports);
@@ -350,6 +352,7 @@ static void async_put_intf(struct work_struct *data);
350static int hso_put_activity(struct hso_device *hso_dev); 352static int hso_put_activity(struct hso_device *hso_dev);
351static int hso_get_activity(struct hso_device *hso_dev); 353static int hso_get_activity(struct hso_device *hso_dev);
352static void tiocmget_intr_callback(struct urb *urb); 354static void tiocmget_intr_callback(struct urb *urb);
355static void reset_device(struct work_struct *data);
353/*****************************************************************************/ 356/*****************************************************************************/
354/* Helping functions */ 357/* Helping functions */
355/*****************************************************************************/ 358/*****************************************************************************/
@@ -461,10 +464,17 @@ static const struct usb_device_id hso_ids[] = {
461 {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */ 464 {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */
462 {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */ 465 {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */
463 {USB_DEVICE(0x0af0, 0x7701)}, 466 {USB_DEVICE(0x0af0, 0x7701)},
467 {USB_DEVICE(0x0af0, 0x7706)},
464 {USB_DEVICE(0x0af0, 0x7801)}, 468 {USB_DEVICE(0x0af0, 0x7801)},
465 {USB_DEVICE(0x0af0, 0x7901)}, 469 {USB_DEVICE(0x0af0, 0x7901)},
470 {USB_DEVICE(0x0af0, 0x7A01)},
471 {USB_DEVICE(0x0af0, 0x7A05)},
466 {USB_DEVICE(0x0af0, 0x8200)}, 472 {USB_DEVICE(0x0af0, 0x8200)},
467 {USB_DEVICE(0x0af0, 0x8201)}, 473 {USB_DEVICE(0x0af0, 0x8201)},
474 {USB_DEVICE(0x0af0, 0x8300)},
475 {USB_DEVICE(0x0af0, 0x8302)},
476 {USB_DEVICE(0x0af0, 0x8304)},
477 {USB_DEVICE(0x0af0, 0x8400)},
468 {USB_DEVICE(0x0af0, 0xd035)}, 478 {USB_DEVICE(0x0af0, 0xd035)},
469 {USB_DEVICE(0x0af0, 0xd055)}, 479 {USB_DEVICE(0x0af0, 0xd055)},
470 {USB_DEVICE(0x0af0, 0xd155)}, 480 {USB_DEVICE(0x0af0, 0xd155)},
@@ -473,6 +483,8 @@ static const struct usb_device_id hso_ids[] = {
473 {USB_DEVICE(0x0af0, 0xd157)}, 483 {USB_DEVICE(0x0af0, 0xd157)},
474 {USB_DEVICE(0x0af0, 0xd257)}, 484 {USB_DEVICE(0x0af0, 0xd257)},
475 {USB_DEVICE(0x0af0, 0xd357)}, 485 {USB_DEVICE(0x0af0, 0xd357)},
486 {USB_DEVICE(0x0af0, 0xd058)},
487 {USB_DEVICE(0x0af0, 0xc100)},
476 {} 488 {}
477}; 489};
478MODULE_DEVICE_TABLE(usb, hso_ids); 490MODULE_DEVICE_TABLE(usb, hso_ids);
@@ -655,8 +667,8 @@ static void set_serial_by_index(unsigned index, struct hso_serial *serial)
655 spin_unlock_irqrestore(&serial_table_lock, flags); 667 spin_unlock_irqrestore(&serial_table_lock, flags);
656} 668}
657 669
658/* log a meaningful explanation of an USB status */ 670static void handle_usb_error(int status, const char *function,
659static void log_usb_status(int status, const char *function) 671 struct hso_device *hso_dev)
660{ 672{
661 char *explanation; 673 char *explanation;
662 674
@@ -685,10 +697,20 @@ static void log_usb_status(int status, const char *function)
685 case -EMSGSIZE: 697 case -EMSGSIZE:
686 explanation = "internal error"; 698 explanation = "internal error";
687 break; 699 break;
700 case -EILSEQ:
701 case -EPROTO:
702 case -ETIME:
703 case -ETIMEDOUT:
704 explanation = "protocol error";
705 if (hso_dev)
706 schedule_work(&hso_dev->reset_device);
707 break;
688 default: 708 default:
689 explanation = "unknown status"; 709 explanation = "unknown status";
690 break; 710 break;
691 } 711 }
712
713 /* log a meaningful explanation of an USB status */
692 D1("%s: received USB status - %s (%d)", function, explanation, status); 714 D1("%s: received USB status - %s (%d)", function, explanation, status);
693} 715}
694 716
@@ -762,7 +784,7 @@ static void write_bulk_callback(struct urb *urb)
762 /* log status, but don't act on it, we don't need to resubmit anything 784 /* log status, but don't act on it, we don't need to resubmit anything
763 * anyhow */ 785 * anyhow */
764 if (status) 786 if (status)
765 log_usb_status(status, __func__); 787 handle_usb_error(status, __func__, odev->parent);
766 788
767 hso_put_activity(odev->parent); 789 hso_put_activity(odev->parent);
768 790
@@ -806,7 +828,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
806 result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC); 828 result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC);
807 if (result) { 829 if (result) {
808 dev_warn(&odev->parent->interface->dev, 830 dev_warn(&odev->parent->interface->dev,
809 "failed mux_bulk_tx_urb %d", result); 831 "failed mux_bulk_tx_urb %d\n", result);
810 net->stats.tx_errors++; 832 net->stats.tx_errors++;
811 netif_start_queue(net); 833 netif_start_queue(net);
812 } else { 834 } else {
@@ -998,7 +1020,7 @@ static void read_bulk_callback(struct urb *urb)
998 1020
999 /* is al ok? (Filip: Who's Al ?) */ 1021 /* is al ok? (Filip: Who's Al ?) */
1000 if (status) { 1022 if (status) {
1001 log_usb_status(status, __func__); 1023 handle_usb_error(status, __func__, odev->parent);
1002 return; 1024 return;
1003 } 1025 }
1004 1026
@@ -1019,7 +1041,8 @@ static void read_bulk_callback(struct urb *urb)
1019 if (odev->parent->port_spec & HSO_INFO_CRC_BUG) { 1041 if (odev->parent->port_spec & HSO_INFO_CRC_BUG) {
1020 u32 rest; 1042 u32 rest;
1021 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; 1043 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1022 rest = urb->actual_length % odev->in_endp->wMaxPacketSize; 1044 rest = urb->actual_length %
1045 le16_to_cpu(odev->in_endp->wMaxPacketSize);
1023 if (((rest == 5) || (rest == 6)) && 1046 if (((rest == 5) || (rest == 6)) &&
1024 !memcmp(((u8 *) urb->transfer_buffer) + 1047 !memcmp(((u8 *) urb->transfer_buffer) +
1025 urb->actual_length - 4, crc_check, 4)) { 1048 urb->actual_length - 4, crc_check, 4)) {
@@ -1053,7 +1076,7 @@ static void read_bulk_callback(struct urb *urb)
1053 result = usb_submit_urb(urb, GFP_ATOMIC); 1076 result = usb_submit_urb(urb, GFP_ATOMIC);
1054 if (result) 1077 if (result)
1055 dev_warn(&odev->parent->interface->dev, 1078 dev_warn(&odev->parent->interface->dev,
1056 "%s failed submit mux_bulk_rx_urb %d", __func__, 1079 "%s failed submit mux_bulk_rx_urb %d\n", __func__,
1057 result); 1080 result);
1058} 1081}
1059 1082
@@ -1207,7 +1230,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
1207 D1("serial == NULL"); 1230 D1("serial == NULL");
1208 return; 1231 return;
1209 } else if (status) { 1232 } else if (status) {
1210 log_usb_status(status, __func__); 1233 handle_usb_error(status, __func__, serial->parent);
1211 return; 1234 return;
1212 } 1235 }
1213 1236
@@ -1225,7 +1248,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
1225 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; 1248 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1226 rest = 1249 rest =
1227 urb->actual_length % 1250 urb->actual_length %
1228 serial->in_endp->wMaxPacketSize; 1251 le16_to_cpu(serial->in_endp->wMaxPacketSize);
1229 if (((rest == 5) || (rest == 6)) && 1252 if (((rest == 5) || (rest == 6)) &&
1230 !memcmp(((u8 *) urb->transfer_buffer) + 1253 !memcmp(((u8 *) urb->transfer_buffer) +
1231 urb->actual_length - 4, crc_check, 4)) { 1254 urb->actual_length - 4, crc_check, 4)) {
@@ -1513,7 +1536,7 @@ static void tiocmget_intr_callback(struct urb *urb)
1513 if (!serial) 1536 if (!serial)
1514 return; 1537 return;
1515 if (status) { 1538 if (status) {
1516 log_usb_status(status, __func__); 1539 handle_usb_error(status, __func__, serial->parent);
1517 return; 1540 return;
1518 } 1541 }
1519 tiocmget = serial->tiocmget; 1542 tiocmget = serial->tiocmget;
@@ -1700,6 +1723,10 @@ static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
1700 D1("no tty structures"); 1723 D1("no tty structures");
1701 return -EINVAL; 1724 return -EINVAL;
1702 } 1725 }
1726
1727 if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM)
1728 return -EINVAL;
1729
1703 if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; 1730 if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
1704 1731
1705 spin_lock_irqsave(&serial->serial_lock, flags); 1732 spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1838,7 +1865,7 @@ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port,
1838 result = usb_submit_urb(ctrl_urb, GFP_ATOMIC); 1865 result = usb_submit_urb(ctrl_urb, GFP_ATOMIC);
1839 if (result) { 1866 if (result) {
1840 dev_err(&ctrl_urb->dev->dev, 1867 dev_err(&ctrl_urb->dev->dev,
1841 "%s failed submit ctrl_urb %d type %d", __func__, 1868 "%s failed submit ctrl_urb %d type %d\n", __func__,
1842 result, type); 1869 result, type);
1843 return result; 1870 return result;
1844 } 1871 }
@@ -1888,7 +1915,7 @@ static void intr_callback(struct urb *urb)
1888 1915
1889 /* status check */ 1916 /* status check */
1890 if (status) { 1917 if (status) {
1891 log_usb_status(status, __func__); 1918 handle_usb_error(status, __func__, NULL);
1892 return; 1919 return;
1893 } 1920 }
1894 D4("\n--- Got intr callback 0x%02X ---", status); 1921 D4("\n--- Got intr callback 0x%02X ---", status);
@@ -1905,18 +1932,18 @@ static void intr_callback(struct urb *urb)
1905 if (serial != NULL) { 1932 if (serial != NULL) {
1906 D1("Pending read interrupt on port %d\n", i); 1933 D1("Pending read interrupt on port %d\n", i);
1907 spin_lock(&serial->serial_lock); 1934 spin_lock(&serial->serial_lock);
1908 if (serial->rx_state == RX_IDLE) { 1935 if (serial->rx_state == RX_IDLE &&
1936 serial->open_count > 0) {
1909 /* Setup and send a ctrl req read on 1937 /* Setup and send a ctrl req read on
1910 * port i */ 1938 * port i */
1911 if (!serial->rx_urb_filled[0]) { 1939 if (!serial->rx_urb_filled[0]) {
1912 serial->rx_state = RX_SENT; 1940 serial->rx_state = RX_SENT;
1913 hso_mux_serial_read(serial); 1941 hso_mux_serial_read(serial);
1914 } else 1942 } else
1915 serial->rx_state = RX_PENDING; 1943 serial->rx_state = RX_PENDING;
1916
1917 } else { 1944 } else {
1918 D1("Already pending a read on " 1945 D1("Already a read pending on "
1919 "port %d\n", i); 1946 "port %d or port not open\n", i);
1920 } 1947 }
1921 spin_unlock(&serial->serial_lock); 1948 spin_unlock(&serial->serial_lock);
1922 } 1949 }
@@ -1958,7 +1985,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
1958 tty = tty_kref_get(serial->tty); 1985 tty = tty_kref_get(serial->tty);
1959 spin_unlock(&serial->serial_lock); 1986 spin_unlock(&serial->serial_lock);
1960 if (status) { 1987 if (status) {
1961 log_usb_status(status, __func__); 1988 handle_usb_error(status, __func__, serial->parent);
1962 tty_kref_put(tty); 1989 tty_kref_put(tty);
1963 return; 1990 return;
1964 } 1991 }
@@ -2014,7 +2041,7 @@ static void ctrl_callback(struct urb *urb)
2014 tty = tty_kref_get(serial->tty); 2041 tty = tty_kref_get(serial->tty);
2015 spin_unlock(&serial->serial_lock); 2042 spin_unlock(&serial->serial_lock);
2016 if (status) { 2043 if (status) {
2017 log_usb_status(status, __func__); 2044 handle_usb_error(status, __func__, serial->parent);
2018 tty_kref_put(tty); 2045 tty_kref_put(tty);
2019 return; 2046 return;
2020 } 2047 }
@@ -2358,12 +2385,12 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
2358 serial->tx_data_length = tx_size; 2385 serial->tx_data_length = tx_size;
2359 serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL); 2386 serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL);
2360 if (!serial->tx_data) { 2387 if (!serial->tx_data) {
2361 dev_err(dev, "%s - Out of memory", __func__); 2388 dev_err(dev, "%s - Out of memory\n", __func__);
2362 goto exit; 2389 goto exit;
2363 } 2390 }
2364 serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL); 2391 serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL);
2365 if (!serial->tx_buffer) { 2392 if (!serial->tx_buffer) {
2366 dev_err(dev, "%s - Out of memory", __func__); 2393 dev_err(dev, "%s - Out of memory\n", __func__);
2367 goto exit; 2394 goto exit;
2368 } 2395 }
2369 2396
@@ -2391,6 +2418,7 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
2391 2418
2392 INIT_WORK(&hso_dev->async_get_intf, async_get_intf); 2419 INIT_WORK(&hso_dev->async_get_intf, async_get_intf);
2393 INIT_WORK(&hso_dev->async_put_intf, async_put_intf); 2420 INIT_WORK(&hso_dev->async_put_intf, async_put_intf);
2421 INIT_WORK(&hso_dev->reset_device, reset_device);
2394 2422
2395 return hso_dev; 2423 return hso_dev;
2396} 2424}
@@ -2831,13 +2859,14 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface)
2831 2859
2832 mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL); 2860 mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL);
2833 if (!mux->shared_intr_urb) { 2861 if (!mux->shared_intr_urb) {
2834 dev_err(&interface->dev, "Could not allocate intr urb?"); 2862 dev_err(&interface->dev, "Could not allocate intr urb?\n");
2835 goto exit; 2863 goto exit;
2836 } 2864 }
2837 mux->shared_intr_buf = kzalloc(mux->intr_endp->wMaxPacketSize, 2865 mux->shared_intr_buf =
2838 GFP_KERNEL); 2866 kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize),
2867 GFP_KERNEL);
2839 if (!mux->shared_intr_buf) { 2868 if (!mux->shared_intr_buf) {
2840 dev_err(&interface->dev, "Could not allocate intr buf?"); 2869 dev_err(&interface->dev, "Could not allocate intr buf?\n");
2841 goto exit; 2870 goto exit;
2842 } 2871 }
2843 2872
@@ -3132,6 +3161,26 @@ out:
3132 return result; 3161 return result;
3133} 3162}
3134 3163
3164static void reset_device(struct work_struct *data)
3165{
3166 struct hso_device *hso_dev =
3167 container_of(data, struct hso_device, reset_device);
3168 struct usb_device *usb = hso_dev->usb;
3169 int result;
3170
3171 if (hso_dev->usb_gone) {
3172 D1("No reset during disconnect\n");
3173 } else {
3174 result = usb_lock_device_for_reset(usb, hso_dev->interface);
3175 if (result < 0)
3176 D1("unable to lock device for reset: %d\n", result);
3177 else {
3178 usb_reset_device(usb);
3179 usb_unlock_device(usb);
3180 }
3181 }
3182}
3183
3135static void hso_serial_ref_free(struct kref *ref) 3184static void hso_serial_ref_free(struct kref *ref)
3136{ 3185{
3137 struct hso_device *hso_dev = container_of(ref, struct hso_device, ref); 3186 struct hso_device *hso_dev = container_of(ref, struct hso_device, ref);
@@ -3232,13 +3281,13 @@ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int,
3232 usb_rcvintpipe(usb, 3281 usb_rcvintpipe(usb,
3233 shared_int->intr_endp->bEndpointAddress & 0x7F), 3282 shared_int->intr_endp->bEndpointAddress & 0x7F),
3234 shared_int->shared_intr_buf, 3283 shared_int->shared_intr_buf,
3235 shared_int->intr_endp->wMaxPacketSize, 3284 1,
3236 intr_callback, shared_int, 3285 intr_callback, shared_int,
3237 shared_int->intr_endp->bInterval); 3286 shared_int->intr_endp->bInterval);
3238 3287
3239 result = usb_submit_urb(shared_int->shared_intr_urb, gfp); 3288 result = usb_submit_urb(shared_int->shared_intr_urb, gfp);
3240 if (result) 3289 if (result)
3241 dev_warn(&usb->dev, "%s failed mux_intr_urb %d", __func__, 3290 dev_warn(&usb->dev, "%s failed mux_intr_urb %d\n", __func__,
3242 result); 3291 result);
3243 3292
3244 return result; 3293 return result;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index f14d225404da..fd19db0d2504 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -270,7 +270,7 @@ static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg)
270 get_registers(dev, PHYCNT, 1, data); 270 get_registers(dev, PHYCNT, 1, data);
271 } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); 271 } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
272 272
273 if (i < MII_TIMEOUT) { 273 if (i <= MII_TIMEOUT) {
274 get_registers(dev, PHYDAT, 2, data); 274 get_registers(dev, PHYDAT, 2, data);
275 *reg = data[0] | (data[1] << 8); 275 *reg = data[0] | (data[1] << 8);
276 return 0; 276 return 0;
@@ -295,7 +295,7 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg)
295 get_registers(dev, PHYCNT, 1, data); 295 get_registers(dev, PHYCNT, 1, data);
296 } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); 296 } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
297 297
298 if (i < MII_TIMEOUT) 298 if (i <= MII_TIMEOUT)
299 return 0; 299 return 0;
300 else 300 else
301 return 1; 301 return 1;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 593e01f64e9b..611b80435955 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -102,6 +102,7 @@ static const int multicast_filter_limit = 32;
102#include <linux/ethtool.h> 102#include <linux/ethtool.h>
103#include <linux/crc32.h> 103#include <linux/crc32.h>
104#include <linux/bitops.h> 104#include <linux/bitops.h>
105#include <linux/workqueue.h>
105#include <asm/processor.h> /* Processor type for cache alignment. */ 106#include <asm/processor.h> /* Processor type for cache alignment. */
106#include <asm/io.h> 107#include <asm/io.h>
107#include <asm/irq.h> 108#include <asm/irq.h>
@@ -389,6 +390,7 @@ struct rhine_private {
389 struct net_device *dev; 390 struct net_device *dev;
390 struct napi_struct napi; 391 struct napi_struct napi;
391 spinlock_t lock; 392 spinlock_t lock;
393 struct work_struct reset_task;
392 394
393 /* Frequently used values: keep some adjacent for cache effect. */ 395 /* Frequently used values: keep some adjacent for cache effect. */
394 u32 quirks; 396 u32 quirks;
@@ -407,6 +409,7 @@ struct rhine_private {
407static int mdio_read(struct net_device *dev, int phy_id, int location); 409static int mdio_read(struct net_device *dev, int phy_id, int location);
408static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 410static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
409static int rhine_open(struct net_device *dev); 411static int rhine_open(struct net_device *dev);
412static void rhine_reset_task(struct work_struct *work);
410static void rhine_tx_timeout(struct net_device *dev); 413static void rhine_tx_timeout(struct net_device *dev);
411static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 414static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
412 struct net_device *dev); 415 struct net_device *dev);
@@ -775,6 +778,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
775 dev->irq = pdev->irq; 778 dev->irq = pdev->irq;
776 779
777 spin_lock_init(&rp->lock); 780 spin_lock_init(&rp->lock);
781 INIT_WORK(&rp->reset_task, rhine_reset_task);
782
778 rp->mii_if.dev = dev; 783 rp->mii_if.dev = dev;
779 rp->mii_if.mdio_read = mdio_read; 784 rp->mii_if.mdio_read = mdio_read;
780 rp->mii_if.mdio_write = mdio_write; 785 rp->mii_if.mdio_write = mdio_write;
@@ -1179,22 +1184,18 @@ static int rhine_open(struct net_device *dev)
1179 return 0; 1184 return 0;
1180} 1185}
1181 1186
1182static void rhine_tx_timeout(struct net_device *dev) 1187static void rhine_reset_task(struct work_struct *work)
1183{ 1188{
1184 struct rhine_private *rp = netdev_priv(dev); 1189 struct rhine_private *rp = container_of(work, struct rhine_private,
1185 void __iomem *ioaddr = rp->base; 1190 reset_task);
1186 1191 struct net_device *dev = rp->dev;
1187 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1188 "%4.4x, resetting...\n",
1189 dev->name, ioread16(ioaddr + IntrStatus),
1190 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1191 1192
1192 /* protect against concurrent rx interrupts */ 1193 /* protect against concurrent rx interrupts */
1193 disable_irq(rp->pdev->irq); 1194 disable_irq(rp->pdev->irq);
1194 1195
1195 napi_disable(&rp->napi); 1196 napi_disable(&rp->napi);
1196 1197
1197 spin_lock(&rp->lock); 1198 spin_lock_bh(&rp->lock);
1198 1199
1199 /* clear all descriptors */ 1200 /* clear all descriptors */
1200 free_tbufs(dev); 1201 free_tbufs(dev);
@@ -1206,7 +1207,7 @@ static void rhine_tx_timeout(struct net_device *dev)
1206 rhine_chip_reset(dev); 1207 rhine_chip_reset(dev);
1207 init_registers(dev); 1208 init_registers(dev);
1208 1209
1209 spin_unlock(&rp->lock); 1210 spin_unlock_bh(&rp->lock);
1210 enable_irq(rp->pdev->irq); 1211 enable_irq(rp->pdev->irq);
1211 1212
1212 dev->trans_start = jiffies; 1213 dev->trans_start = jiffies;
@@ -1214,6 +1215,19 @@ static void rhine_tx_timeout(struct net_device *dev)
1214 netif_wake_queue(dev); 1215 netif_wake_queue(dev);
1215} 1216}
1216 1217
1218static void rhine_tx_timeout(struct net_device *dev)
1219{
1220 struct rhine_private *rp = netdev_priv(dev);
1221 void __iomem *ioaddr = rp->base;
1222
1223 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1224 "%4.4x, resetting...\n",
1225 dev->name, ioread16(ioaddr + IntrStatus),
1226 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1227
1228 schedule_work(&rp->reset_task);
1229}
1230
1217static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 1231static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1218 struct net_device *dev) 1232 struct net_device *dev)
1219{ 1233{
@@ -1830,10 +1844,11 @@ static int rhine_close(struct net_device *dev)
1830 struct rhine_private *rp = netdev_priv(dev); 1844 struct rhine_private *rp = netdev_priv(dev);
1831 void __iomem *ioaddr = rp->base; 1845 void __iomem *ioaddr = rp->base;
1832 1846
1833 spin_lock_irq(&rp->lock);
1834
1835 netif_stop_queue(dev);
1836 napi_disable(&rp->napi); 1847 napi_disable(&rp->napi);
1848 cancel_work_sync(&rp->reset_task);
1849 netif_stop_queue(dev);
1850
1851 spin_lock_irq(&rp->lock);
1837 1852
1838 if (debug > 1) 1853 if (debug > 1)
1839 printk(KERN_DEBUG "%s: Shutting down ethercard, " 1854 printk(KERN_DEBUG "%s: Shutting down ethercard, "
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 4ceb441f2687..317aa34b21cf 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1877,13 +1877,12 @@ static void velocity_error(struct velocity_info *vptr, int status)
1877/** 1877/**
1878 * tx_srv - transmit interrupt service 1878 * tx_srv - transmit interrupt service
1879 * @vptr; Velocity 1879 * @vptr; Velocity
1880 * @status:
1881 * 1880 *
1882 * Scan the queues looking for transmitted packets that 1881 * Scan the queues looking for transmitted packets that
1883 * we can complete and clean up. Update any statistics as 1882 * we can complete and clean up. Update any statistics as
1884 * necessary/ 1883 * necessary/
1885 */ 1884 */
1886static int velocity_tx_srv(struct velocity_info *vptr, u32 status) 1885static int velocity_tx_srv(struct velocity_info *vptr)
1887{ 1886{
1888 struct tx_desc *td; 1887 struct tx_desc *td;
1889 int qnum; 1888 int qnum;
@@ -2090,14 +2089,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2090/** 2089/**
2091 * velocity_rx_srv - service RX interrupt 2090 * velocity_rx_srv - service RX interrupt
2092 * @vptr: velocity 2091 * @vptr: velocity
2093 * @status: adapter status (unused)
2094 * 2092 *
2095 * Walk the receive ring of the velocity adapter and remove 2093 * Walk the receive ring of the velocity adapter and remove
2096 * any received packets from the receive queue. Hand the ring 2094 * any received packets from the receive queue. Hand the ring
2097 * slots back to the adapter for reuse. 2095 * slots back to the adapter for reuse.
2098 */ 2096 */
2099static int velocity_rx_srv(struct velocity_info *vptr, int status, 2097static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2100 int budget_left)
2101{ 2098{
2102 struct net_device_stats *stats = &vptr->dev->stats; 2099 struct net_device_stats *stats = &vptr->dev->stats;
2103 int rd_curr = vptr->rx.curr; 2100 int rd_curr = vptr->rx.curr;
@@ -2151,32 +2148,24 @@ static int velocity_poll(struct napi_struct *napi, int budget)
2151 struct velocity_info *vptr = container_of(napi, 2148 struct velocity_info *vptr = container_of(napi,
2152 struct velocity_info, napi); 2149 struct velocity_info, napi);
2153 unsigned int rx_done; 2150 unsigned int rx_done;
2154 u32 isr_status; 2151 unsigned long flags;
2155
2156 spin_lock(&vptr->lock);
2157 isr_status = mac_read_isr(vptr->mac_regs);
2158
2159 /* Ack the interrupt */
2160 mac_write_isr(vptr->mac_regs, isr_status);
2161 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2162 velocity_error(vptr, isr_status);
2163 2152
2153 spin_lock_irqsave(&vptr->lock, flags);
2164 /* 2154 /*
2165 * Do rx and tx twice for performance (taken from the VIA 2155 * Do rx and tx twice for performance (taken from the VIA
2166 * out-of-tree driver). 2156 * out-of-tree driver).
2167 */ 2157 */
2168 rx_done = velocity_rx_srv(vptr, isr_status, budget / 2); 2158 rx_done = velocity_rx_srv(vptr, budget / 2);
2169 velocity_tx_srv(vptr, isr_status); 2159 velocity_tx_srv(vptr);
2170 rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done); 2160 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2171 velocity_tx_srv(vptr, isr_status); 2161 velocity_tx_srv(vptr);
2172
2173 spin_unlock(&vptr->lock);
2174 2162
2175 /* If budget not fully consumed, exit the polling mode */ 2163 /* If budget not fully consumed, exit the polling mode */
2176 if (rx_done < budget) { 2164 if (rx_done < budget) {
2177 napi_complete(napi); 2165 napi_complete(napi);
2178 mac_enable_int(vptr->mac_regs); 2166 mac_enable_int(vptr->mac_regs);
2179 } 2167 }
2168 spin_unlock_irqrestore(&vptr->lock, flags);
2180 2169
2181 return rx_done; 2170 return rx_done;
2182} 2171}
@@ -2206,10 +2195,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
2206 return IRQ_NONE; 2195 return IRQ_NONE;
2207 } 2196 }
2208 2197
2198 /* Ack the interrupt */
2199 mac_write_isr(vptr->mac_regs, isr_status);
2200
2209 if (likely(napi_schedule_prep(&vptr->napi))) { 2201 if (likely(napi_schedule_prep(&vptr->napi))) {
2210 mac_disable_int(vptr->mac_regs); 2202 mac_disable_int(vptr->mac_regs);
2211 __napi_schedule(&vptr->napi); 2203 __napi_schedule(&vptr->napi);
2212 } 2204 }
2205
2206 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2207 velocity_error(vptr, isr_status);
2208
2213 spin_unlock(&vptr->lock); 2209 spin_unlock(&vptr->lock);
2214 2210
2215 return IRQ_HANDLED; 2211 return IRQ_HANDLED;
@@ -2237,8 +2233,6 @@ static int velocity_open(struct net_device *dev)
2237 /* Ensure chip is running */ 2233 /* Ensure chip is running */
2238 pci_set_power_state(vptr->pdev, PCI_D0); 2234 pci_set_power_state(vptr->pdev, PCI_D0);
2239 2235
2240 velocity_give_many_rx_descs(vptr);
2241
2242 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2236 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2243 2237
2244 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, 2238 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
@@ -2250,6 +2244,8 @@ static int velocity_open(struct net_device *dev)
2250 goto out; 2244 goto out;
2251 } 2245 }
2252 2246
2247 velocity_give_many_rx_descs(vptr);
2248
2253 mac_enable_int(vptr->mac_regs); 2249 mac_enable_int(vptr->mac_regs);
2254 netif_start_queue(dev); 2250 netif_start_queue(dev);
2255 napi_enable(&vptr->napi); 2251 napi_enable(&vptr->napi);
@@ -2339,10 +2335,10 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2339 2335
2340 dev->mtu = new_mtu; 2336 dev->mtu = new_mtu;
2341 2337
2342 velocity_give_many_rx_descs(vptr);
2343
2344 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2338 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2345 2339
2340 velocity_give_many_rx_descs(vptr);
2341
2346 mac_enable_int(vptr->mac_regs); 2342 mac_enable_int(vptr->mac_regs);
2347 netif_start_queue(dev); 2343 netif_start_queue(dev);
2348 2344
@@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev)
3100 velocity_init_registers(vptr, VELOCITY_INIT_WOL); 3096 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3101 mac_disable_int(vptr->mac_regs); 3097 mac_disable_int(vptr->mac_regs);
3102 3098
3103 velocity_tx_srv(vptr, 0); 3099 velocity_tx_srv(vptr);
3104 3100
3105 for (i = 0; i < vptr->tx.numq; i++) { 3101 for (i = 0; i < vptr->tx.numq; i++) {
3106 if (vptr->tx.used[i]) 3102 if (vptr->tx.used[i])
@@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3344{ 3340{
3345 struct velocity_info *vptr = netdev_priv(dev); 3341 struct velocity_info *vptr = netdev_priv(dev);
3346 int max_us = 0x3f * 64; 3342 int max_us = 0x3f * 64;
3343 unsigned long flags;
3347 3344
3348 /* 6 bits of */ 3345 /* 6 bits of */
3349 if (ecmd->tx_coalesce_usecs > max_us) 3346 if (ecmd->tx_coalesce_usecs > max_us)
@@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3365 ecmd->tx_coalesce_usecs); 3362 ecmd->tx_coalesce_usecs);
3366 3363
3367 /* Setup the interrupt suppression and queue timers */ 3364 /* Setup the interrupt suppression and queue timers */
3365 spin_lock_irqsave(&vptr->lock, flags);
3368 mac_disable_int(vptr->mac_regs); 3366 mac_disable_int(vptr->mac_regs);
3369 setup_adaptive_interrupts(vptr); 3367 setup_adaptive_interrupts(vptr);
3370 setup_queue_timers(vptr); 3368 setup_queue_timers(vptr);
@@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3372 mac_write_int_mask(vptr->int_mask, vptr->mac_regs); 3370 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3373 mac_clear_isr(vptr->mac_regs); 3371 mac_clear_isr(vptr->mac_regs);
3374 mac_enable_int(vptr->mac_regs); 3372 mac_enable_int(vptr->mac_regs);
3373 spin_unlock_irqrestore(&vptr->lock, flags);
3375 3374
3376 return 0; 3375 return 0;
3377} 3376}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c708ecc3cb2e..9ead30bd00c4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -395,8 +395,7 @@ static void refill_work(struct work_struct *work)
395 395
396 vi = container_of(work, struct virtnet_info, refill.work); 396 vi = container_of(work, struct virtnet_info, refill.work);
397 napi_disable(&vi->napi); 397 napi_disable(&vi->napi);
398 try_fill_recv(vi, GFP_KERNEL); 398 still_empty = !try_fill_recv(vi, GFP_KERNEL);
399 still_empty = (vi->num == 0);
400 napi_enable(&vi->napi); 399 napi_enable(&vi->napi);
401 400
402 /* In theory, this can happen: if we don't get any buffers in 401 /* In theory, this can happen: if we don't get any buffers in
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index f1c4b2a1e867..b9685e82f7b6 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -310,7 +310,7 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
310 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, 310 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
311 rx_priv->data_size, PCI_DMA_FROMDEVICE); 311 rx_priv->data_size, PCI_DMA_FROMDEVICE);
312 312
313 if (dma_addr == 0) { 313 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
314 ring->stats.pci_map_fail++; 314 ring->stats.pci_map_fail++;
315 return -EIO; 315 return -EIO;
316 } 316 }
@@ -4087,21 +4087,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4087 goto _exit0; 4087 goto _exit0;
4088 } 4088 }
4089 4089
4090 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { 4090 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4091 vxge_debug_ll_config(VXGE_TRACE, 4091 vxge_debug_ll_config(VXGE_TRACE,
4092 "%s : using 64bit DMA", __func__); 4092 "%s : using 64bit DMA", __func__);
4093 4093
4094 high_dma = 1; 4094 high_dma = 1;
4095 4095
4096 if (pci_set_consistent_dma_mask(pdev, 4096 if (pci_set_consistent_dma_mask(pdev,
4097 0xffffffffffffffffULL)) { 4097 DMA_BIT_MASK(64))) {
4098 vxge_debug_init(VXGE_ERR, 4098 vxge_debug_init(VXGE_ERR,
4099 "%s : unable to obtain 64bit DMA for " 4099 "%s : unable to obtain 64bit DMA for "
4100 "consistent allocations", __func__); 4100 "consistent allocations", __func__);
4101 ret = -ENOMEM; 4101 ret = -ENOMEM;
4102 goto _exit1; 4102 goto _exit1;
4103 } 4103 }
4104 } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) { 4104 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4105 vxge_debug_ll_config(VXGE_TRACE, 4105 vxge_debug_ll_config(VXGE_TRACE,
4106 "%s : using 32bit DMA", __func__); 4106 "%s : using 32bit DMA", __func__);
4107 } else { 4107 } else {
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 5cc0f279417e..2d7c96d7e865 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -151,6 +151,7 @@ enum {
151 151
152 /* Device IDs */ 152 /* Device IDs */
153 USB_DEVICE_ID_I6050 = 0x0186, 153 USB_DEVICE_ID_I6050 = 0x0186,
154 USB_DEVICE_ID_I6050_2 = 0x0188,
154}; 155};
155 156
156 157
@@ -234,6 +235,7 @@ struct i2400mu {
234 u8 rx_size_auto_shrink; 235 u8 rx_size_auto_shrink;
235 236
236 struct dentry *debugfs_dentry; 237 struct dentry *debugfs_dentry;
238 unsigned i6050:1; /* 1 if this is a 6050 based SKU */
237}; 239};
238 240
239 241
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 3b48681f8a0d..98f4f8c5fb68 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -478,7 +478,16 @@ int i2400mu_probe(struct usb_interface *iface,
478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; 478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
479 i2400m->bus_bm_mac_addr_impaired = 0; 479 i2400m->bus_bm_mac_addr_impaired = 0;
480 480
481 if (id->idProduct == USB_DEVICE_ID_I6050) { 481 switch (id->idProduct) {
482 case USB_DEVICE_ID_I6050:
483 case USB_DEVICE_ID_I6050_2:
484 i2400mu->i6050 = 1;
485 break;
486 default:
487 break;
488 }
489
490 if (i2400mu->i6050) {
482 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; 491 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
483 i2400mu->endpoint_cfg.bulk_out = 0; 492 i2400mu->endpoint_cfg.bulk_out = 0;
484 i2400mu->endpoint_cfg.notification = 3; 493 i2400mu->endpoint_cfg.notification = 3;
@@ -719,6 +728,7 @@ int i2400mu_post_reset(struct usb_interface *iface)
719static 728static
720struct usb_device_id i2400mu_id_table[] = { 729struct usb_device_id i2400mu_id_table[] = {
721 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, 730 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
731 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
722 { USB_DEVICE(0x8086, 0x0181) }, 732 { USB_DEVICE(0x8086, 0x0181) },
723 { USB_DEVICE(0x8086, 0x1403) }, 733 { USB_DEVICE(0x8086, 0x1403) },
724 { USB_DEVICE(0x8086, 0x1405) }, 734 { USB_DEVICE(0x8086, 0x1405) },
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a4c086f069b1..e63b7c40d0ee 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1903,17 +1903,6 @@ accept:
1903 rxs->noise = sc->ah->ah_noise_floor; 1903 rxs->noise = sc->ah->ah_noise_floor;
1904 rxs->signal = rxs->noise + rs.rs_rssi; 1904 rxs->signal = rxs->noise + rs.rs_rssi;
1905 1905
1906 /* An rssi of 35 indicates you should be able use
1907 * 54 Mbps reliably. A more elaborate scheme can be used
1908 * here but it requires a map of SNR/throughput for each
1909 * possible mode used */
1910 rxs->qual = rs.rs_rssi * 100 / 35;
1911
1912 /* rssi can be more than 35 though, anything above that
1913 * should be considered at 100% */
1914 if (rxs->qual > 100)
1915 rxs->qual = 100;
1916
1917 rxs->antenna = rs.rs_antenna; 1906 rxs->antenna = rs.rs_antenna;
1918 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); 1907 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
1919 rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); 1908 rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@@ -2381,6 +2370,9 @@ ath5k_init(struct ath5k_softc *sc)
2381 */ 2370 */
2382 ath5k_stop_locked(sc); 2371 ath5k_stop_locked(sc);
2383 2372
2373 /* Set PHY calibration interval */
2374 ah->ah_cal_intval = ath5k_calinterval;
2375
2384 /* 2376 /*
2385 * The basic interface to setting the hardware in a good 2377 * The basic interface to setting the hardware in a good
2386 * state is ``reset''. On return the hardware is known to 2378 * state is ``reset''. On return the hardware is known to
@@ -2408,10 +2400,6 @@ ath5k_init(struct ath5k_softc *sc)
2408 2400
2409 /* Set ack to be sent at low bit-rates */ 2401 /* Set ack to be sent at low bit-rates */
2410 ath5k_hw_set_ack_bitrate_high(ah, false); 2402 ath5k_hw_set_ack_bitrate_high(ah, false);
2411
2412 /* Set PHY calibration inteval */
2413 ah->ah_cal_intval = ath5k_calinterval;
2414
2415 ret = 0; 2403 ret = 0;
2416done: 2404done:
2417 mmiowb(); 2405 mmiowb();
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 5d1c8677f180..6a3f4da7fb48 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -97,7 +97,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
97 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 97 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
98 int ret; 98 int ret;
99 u16 val; 99 u16 val;
100 u32 cksum, offset; 100 u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
101 101
102 /* 102 /*
103 * Read values from EEPROM and store them in the capability structure 103 * Read values from EEPROM and store them in the capability structure
@@ -116,12 +116,38 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
116 * Validate the checksum of the EEPROM date. There are some 116 * Validate the checksum of the EEPROM date. There are some
117 * devices with invalid EEPROMs. 117 * devices with invalid EEPROMs.
118 */ 118 */
119 for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) { 119 AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val);
120 if (val) {
121 eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
122 AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
123 AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val);
124 eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE;
125
126 /*
127 * Fail safe check to prevent stupid loops due
128 * to busted EEPROMs. XXX: This value is likely too
129 * big still, waiting on a better value.
130 */
131 if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
132 ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
133 "%d (0x%04x) max expected: %d (0x%04x)\n",
134 eep_max, eep_max,
135 3 * AR5K_EEPROM_INFO_MAX,
136 3 * AR5K_EEPROM_INFO_MAX);
137 return -EIO;
138 }
139 }
140
141 for (cksum = 0, offset = 0; offset < eep_max; offset++) {
120 AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val); 142 AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
121 cksum ^= val; 143 cksum ^= val;
122 } 144 }
123 if (cksum != AR5K_EEPROM_INFO_CKSUM) { 145 if (cksum != AR5K_EEPROM_INFO_CKSUM) {
124 ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum); 146 ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
147 "checksum: 0x%04x eep_max: 0x%04x (%s)\n",
148 cksum, eep_max,
149 eep_max == AR5K_EEPROM_INFO_MAX ?
150 "default size" : "custom size");
125 return -EIO; 151 return -EIO;
126 } 152 }
127 153
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 0123f3521a0b..473a483bb9c3 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -37,6 +37,14 @@
37#define AR5K_EEPROM_RFKILL_POLARITY_S 1 37#define AR5K_EEPROM_RFKILL_POLARITY_S 1
38 38
39#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */ 39#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
40
41/* FLASH(EEPROM) Defines for AR531X chips */
42#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */
43#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */
44#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0
45#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4
46#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12
47
40#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */ 48#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
41#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */ 49#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
42#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE) 50#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 03a1106ad725..5774cea23a3b 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -25,7 +25,7 @@ config ATH9K
25 25
26config ATH9K_DEBUGFS 26config ATH9K_DEBUGFS
27 bool "Atheros ath9k debugging" 27 bool "Atheros ath9k debugging"
28 depends on ATH9K 28 depends on ATH9K && DEBUG_FS
29 ---help--- 29 ---help---
30 Say Y, if you need access to ath9k's statistics for 30 Say Y, if you need access to ath9k's statistics for
31 interrupts, rate control, etc. 31 interrupts, rate control, etc.
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index e2cef2ff5d8f..1597a42731ed 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -33,11 +33,11 @@ struct ath_node;
33 33
34/* Macro to expand scalars to 64-bit objects */ 34/* Macro to expand scalars to 64-bit objects */
35 35
36#define ito64(x) (sizeof(x) == 8) ? \ 36#define ito64(x) (sizeof(x) == 1) ? \
37 (((unsigned long long int)(x)) & (0xff)) : \ 37 (((unsigned long long int)(x)) & (0xff)) : \
38 (sizeof(x) == 16) ? \ 38 (sizeof(x) == 2) ? \
39 (((unsigned long long int)(x)) & 0xffff) : \ 39 (((unsigned long long int)(x)) & 0xffff) : \
40 ((sizeof(x) == 32) ? \ 40 ((sizeof(x) == 4) ? \
41 (((unsigned long long int)(x)) & 0xffffffff) : \ 41 (((unsigned long long int)(x)) & 0xffffffff) : \
42 (unsigned long long int)(x)) 42 (unsigned long long int)(x))
43 43
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2ec61f08cfdb..ae371448b5a0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -855,12 +855,11 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
855 } 855 }
856} 856}
857 857
858static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah) 858static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
859{ 859{
860 u32 i, j; 860 u32 i, j;
861 861
862 if ((ah->hw_version.devid == AR9280_DEVID_PCI) && 862 if (ah->hw_version.devid == AR9280_DEVID_PCI) {
863 test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) {
864 863
865 /* EEPROM Fixup */ 864 /* EEPROM Fixup */
866 for (i = 0; i < ah->iniModes.ia_rows; i++) { 865 for (i = 0; i < ah->iniModes.ia_rows; i++) {
@@ -980,7 +979,7 @@ int ath9k_hw_init(struct ath_hw *ah)
980 if (r) 979 if (r)
981 return r; 980 return r;
982 981
983 ath9k_hw_init_11a_eeprom_fix(ah); 982 ath9k_hw_init_eeprom_fix(ah);
984 983
985 r = ath9k_hw_init_macaddr(ah); 984 r = ath9k_hw_init_macaddr(ah);
986 if (r) { 985 if (r) {
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 71b84d91dcff..efc420cd42bf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -186,7 +186,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
186 wait = wait_time; 186 wait = wait_time;
187 while (ath9k_hw_numtxpending(ah, q)) { 187 while (ath9k_hw_numtxpending(ah, q)) {
188 if ((--wait) == 0) { 188 if ((--wait) == 0) {
189 ath_print(common, ATH_DBG_QUEUE, 189 ath_print(common, ATH_DBG_FATAL,
190 "Failed to stop TX DMA in 100 " 190 "Failed to stop TX DMA in 100 "
191 "msec after killing last frame\n"); 191 "msec after killing last frame\n");
192 break; 192 break;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 0c87771383f0..e185479e295e 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -77,6 +77,9 @@
77#define ATH9K_TXERR_XTXOP 0x08 77#define ATH9K_TXERR_XTXOP 0x08
78#define ATH9K_TXERR_TIMER_EXPIRED 0x10 78#define ATH9K_TXERR_TIMER_EXPIRED 0x10
79#define ATH9K_TX_ACKED 0x20 79#define ATH9K_TX_ACKED 0x20
80#define ATH9K_TXERR_MASK \
81 (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
82 ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
80 83
81#define ATH9K_TX_BA 0x01 84#define ATH9K_TX_BA 0x01
82#define ATH9K_TX_PWRMGMT 0x02 85#define ATH9K_TX_PWRMGMT 0x02
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c48743452515..643bea35686f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1973,6 +1973,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1973 struct ieee80211_hw *hw = sc->hw; 1973 struct ieee80211_hw *hw = sc->hw;
1974 int r; 1974 int r;
1975 1975
1976 /* Stop ANI */
1977 del_timer_sync(&common->ani.timer);
1978
1976 ath9k_hw_set_interrupts(ah, 0); 1979 ath9k_hw_set_interrupts(ah, 0);
1977 ath_drain_all_txq(sc, retry_tx); 1980 ath_drain_all_txq(sc, retry_tx);
1978 ath_stoprecv(sc); 1981 ath_stoprecv(sc);
@@ -2014,6 +2017,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
2014 } 2017 }
2015 } 2018 }
2016 2019
2020 /* Start ANI */
2021 ath_start_ani(common);
2022
2017 return r; 2023 return r;
2018} 2024}
2019 2025
@@ -2508,6 +2514,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2508 return; /* another wiphy still in use */ 2514 return; /* another wiphy still in use */
2509 } 2515 }
2510 2516
2517 /* Ensure HW is awake when we try to shut it down. */
2518 ath9k_ps_wakeup(sc);
2519
2511 if (ah->btcoex_hw.enabled) { 2520 if (ah->btcoex_hw.enabled) {
2512 ath9k_hw_btcoex_disable(ah); 2521 ath9k_hw_btcoex_disable(ah);
2513 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 2522 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -2528,6 +2537,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2528 /* disable HAL and put h/w to sleep */ 2537 /* disable HAL and put h/w to sleep */
2529 ath9k_hw_disable(ah); 2538 ath9k_hw_disable(ah);
2530 ath9k_hw_configpcipowersave(ah, 1, 1); 2539 ath9k_hw_configpcipowersave(ah, 1, 1);
2540 ath9k_ps_restore(sc);
2541
2542 /* Finally, put the chip in FULL SLEEP mode */
2531 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); 2543 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
2532 2544
2533 sc->sc_flags |= SC_OP_INVALID; 2545 sc->sc_flags |= SC_OP_INVALID;
@@ -2641,10 +2653,12 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2641 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 2653 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
2642 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || 2654 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
2643 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { 2655 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2656 ath9k_ps_wakeup(sc);
2644 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 2657 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2645 ath_beacon_return(sc, avp); 2658 ath9k_ps_restore(sc);
2646 } 2659 }
2647 2660
2661 ath_beacon_return(sc, avp);
2648 sc->sc_flags &= ~SC_OP_BEACONS; 2662 sc->sc_flags &= ~SC_OP_BEACONS;
2649 2663
2650 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 2664 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
@@ -3091,15 +3105,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
3091 case IEEE80211_AMPDU_RX_STOP: 3105 case IEEE80211_AMPDU_RX_STOP:
3092 break; 3106 break;
3093 case IEEE80211_AMPDU_TX_START: 3107 case IEEE80211_AMPDU_TX_START:
3108 ath9k_ps_wakeup(sc);
3094 ath_tx_aggr_start(sc, sta, tid, ssn); 3109 ath_tx_aggr_start(sc, sta, tid, ssn);
3095 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3110 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3111 ath9k_ps_restore(sc);
3096 break; 3112 break;
3097 case IEEE80211_AMPDU_TX_STOP: 3113 case IEEE80211_AMPDU_TX_STOP:
3114 ath9k_ps_wakeup(sc);
3098 ath_tx_aggr_stop(sc, sta, tid); 3115 ath_tx_aggr_stop(sc, sta, tid);
3099 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3116 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3117 ath9k_ps_restore(sc);
3100 break; 3118 break;
3101 case IEEE80211_AMPDU_TX_OPERATIONAL: 3119 case IEEE80211_AMPDU_TX_OPERATIONAL:
3120 ath9k_ps_wakeup(sc);
3102 ath_tx_aggr_resume(sc, sta, tid); 3121 ath_tx_aggr_resume(sc, sta, tid);
3122 ath9k_ps_restore(sc);
3103 break; 3123 break;
3104 default: 3124 default:
3105 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 3125 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 5321f735e5a0..f7af5ea54753 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -96,7 +96,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
96 pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm); 96 pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
97} 97}
98 98
99const static struct ath_bus_ops ath_pci_bus_ops = { 99static const struct ath_bus_ops ath_pci_bus_ops = {
100 .read_cachesize = ath_pci_read_cachesize, 100 .read_cachesize = ath_pci_read_cachesize,
101 .cleanup = ath_pci_cleanup, 101 .cleanup = ath_pci_cleanup,
102 .eeprom_read = ath_pci_eeprom_read, 102 .eeprom_read = ath_pci_eeprom_read,
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 2a11cc57ceea..29bf33692f71 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1108,11 +1108,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1108 if (npend) { 1108 if (npend) {
1109 int r; 1109 int r;
1110 1110
1111 ath_print(common, ATH_DBG_XMIT, 1111 ath_print(common, ATH_DBG_FATAL,
1112 "Unable to stop TxDMA. Reset HAL!\n"); 1112 "Unable to stop TxDMA. Reset HAL!\n");
1113 1113
1114 spin_lock_bh(&sc->sc_resetlock); 1114 spin_lock_bh(&sc->sc_resetlock);
1115 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true); 1115 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1116 if (r) 1116 if (r)
1117 ath_print(common, ATH_DBG_FATAL, 1117 ath_print(common, ATH_DBG_FATAL,
1118 "Unable to reset hardware; reset status %d\n", 1118 "Unable to reset hardware; reset status %d\n",
@@ -1414,17 +1414,9 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
1414 * For HT capable stations, we save tidno for later use. 1414 * For HT capable stations, we save tidno for later use.
1415 * We also override seqno set by upper layer with the one 1415 * We also override seqno set by upper layer with the one
1416 * in tx aggregation state. 1416 * in tx aggregation state.
1417 *
1418 * If fragmentation is on, the sequence number is
1419 * not overridden, since it has been
1420 * incremented by the fragmentation routine.
1421 *
1422 * FIXME: check if the fragmentation threshold exceeds
1423 * IEEE80211 max.
1424 */ 1417 */
1425 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1418 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1426 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << 1419 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1427 IEEE80211_SEQ_SEQ_SHIFT);
1428 bf->bf_seqno = tid->seq_next; 1420 bf->bf_seqno = tid->seq_next;
1429 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1421 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1430} 1422}
@@ -1623,7 +1615,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1623 bf->bf_frmlen -= padsize; 1615 bf->bf_frmlen -= padsize;
1624 } 1616 }
1625 1617
1626 if (conf_is_ht(&hw->conf) && !is_pae(skb)) 1618 if (conf_is_ht(&hw->conf))
1627 bf->bf_state.bf_type |= BUF_HT; 1619 bf->bf_state.bf_type |= BUF_HT;
1628 1620
1629 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); 1621 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1636,7 +1628,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1636 bf->bf_keyix = ATH9K_TXKEYIX_INVALID; 1628 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1637 } 1629 }
1638 1630
1639 if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR)) 1631 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1632 (sc->sc_flags & SC_OP_TXAGGR))
1640 assign_aggr_tid_seqno(skb, bf); 1633 assign_aggr_tid_seqno(skb, bf);
1641 1634
1642 bf->bf_mpdu = skb; 1635 bf->bf_mpdu = skb;
@@ -1708,7 +1701,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1708 goto tx_done; 1701 goto tx_done;
1709 } 1702 }
1710 1703
1711 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1704 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
1712 /* 1705 /*
1713 * Try aggregation if it's a unicast data frame 1706 * Try aggregation if it's a unicast data frame
1714 * and the destination is HT capable. 1707 * and the destination is HT capable.
@@ -1780,7 +1773,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1780 struct ath_wiphy *aphy = hw->priv; 1773 struct ath_wiphy *aphy = hw->priv;
1781 struct ath_softc *sc = aphy->sc; 1774 struct ath_softc *sc = aphy->sc;
1782 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1775 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1783 int hdrlen, padsize; 1776 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1777 int padpos, padsize;
1784 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1778 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1785 struct ath_tx_control txctl; 1779 struct ath_tx_control txctl;
1786 1780
@@ -1792,7 +1786,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1792 * BSSes. 1786 * BSSes.
1793 */ 1787 */
1794 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1788 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1795 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1796 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1789 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1797 sc->tx.seq_no += 0x10; 1790 sc->tx.seq_no += 0x10;
1798 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1791 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
@@ -1800,9 +1793,9 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1800 } 1793 }
1801 1794
1802 /* Add the padding after the header if this is not already done */ 1795 /* Add the padding after the header if this is not already done */
1803 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1796 padpos = ath9k_cmn_padpos(hdr->frame_control);
1804 if (hdrlen & 3) { 1797 padsize = padpos & 3;
1805 padsize = hdrlen % 4; 1798 if (padsize && skb->len>padpos) {
1806 if (skb_headroom(skb) < padsize) { 1799 if (skb_headroom(skb) < padsize) {
1807 ath_print(common, ATH_DBG_XMIT, 1800 ath_print(common, ATH_DBG_XMIT,
1808 "TX CABQ padding failed\n"); 1801 "TX CABQ padding failed\n");
@@ -1810,7 +1803,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1810 return; 1803 return;
1811 } 1804 }
1812 skb_push(skb, padsize); 1805 skb_push(skb, padsize);
1813 memmove(skb->data, skb->data + padsize, hdrlen); 1806 memmove(skb->data, skb->data + padsize, padpos);
1814 } 1807 }
1815 1808
1816 txctl.txq = sc->beacon.cabq; 1809 txctl.txq = sc->beacon.cabq;
@@ -1838,7 +1831,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1838 struct ieee80211_hw *hw = sc->hw; 1831 struct ieee80211_hw *hw = sc->hw;
1839 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1832 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1840 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1833 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1841 int hdrlen, padsize; 1834 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1835 int padpos, padsize;
1842 1836
1843 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1837 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1844 1838
@@ -1853,14 +1847,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1853 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1847 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1854 } 1848 }
1855 1849
1856 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1850 padpos = ath9k_cmn_padpos(hdr->frame_control);
1857 padsize = hdrlen & 3; 1851 padsize = padpos & 3;
1858 if (padsize && hdrlen >= 24) { 1852 if (padsize && skb->len>padpos+padsize) {
1859 /* 1853 /*
1860 * Remove MAC header padding before giving the frame back to 1854 * Remove MAC header padding before giving the frame back to
1861 * mac80211. 1855 * mac80211.
1862 */ 1856 */
1863 memmove(skb->data + padsize, skb->data, hdrlen); 1857 memmove(skb->data + padsize, skb->data, padpos);
1864 skb_pull(skb, padsize); 1858 skb_pull(skb, padsize);
1865 } 1859 }
1866 1860
@@ -2078,7 +2072,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2078 &txq->axq_q, lastbf->list.prev); 2072 &txq->axq_q, lastbf->list.prev);
2079 2073
2080 txq->axq_depth--; 2074 txq->axq_depth--;
2081 txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT); 2075 txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
2082 txq->axq_tx_inprogress = false; 2076 txq->axq_tx_inprogress = false;
2083 spin_unlock_bh(&txq->axq_lock); 2077 spin_unlock_bh(&txq->axq_lock);
2084 2078
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index fe3bf9491997..c484cc253892 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -115,6 +115,7 @@
115#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ 115#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
116#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ 116#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
117#define B43_MMIO_RNG 0x65A 117#define B43_MMIO_RNG 0x65A
118#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */
118#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ 119#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
119#define B43_MMIO_IFSCTL_USE_EDCF 0x0004 120#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
120#define B43_MMIO_POWERUP_DELAY 0x6A8 121#define B43_MMIO_POWERUP_DELAY 0x6A8
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 027be275e035..88d1fd02d40a 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -383,160 +383,44 @@ static inline
383 } 383 }
384} 384}
385 385
386/* Check if a DMA region fits the device constraints.
387 * Returns true, if the region is OK for usage with this device. */
388static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
389 dma_addr_t addr, size_t size)
390{
391 switch (ring->type) {
392 case B43_DMA_30BIT:
393 if ((u64)addr + size > (1ULL << 30))
394 return 0;
395 break;
396 case B43_DMA_32BIT:
397 if ((u64)addr + size > (1ULL << 32))
398 return 0;
399 break;
400 case B43_DMA_64BIT:
401 /* Currently we can't have addresses beyond
402 * 64bit in the kernel. */
403 break;
404 }
405 return 1;
406}
407
408#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
409#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
410
411static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
412 dma_addr_t dmaaddr, size_t size)
413{
414 ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
415 free_pages((unsigned long)base, get_order(size));
416}
417
418static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
419 dma_addr_t *dmaaddr, size_t size,
420 gfp_t gfp_flags)
421{
422 void *base;
423
424 base = (void *)__get_free_pages(gfp_flags, get_order(size));
425 if (!base)
426 return NULL;
427 memset(base, 0, size);
428 *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
429 DMA_TO_DEVICE);
430 if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
431 free_pages((unsigned long)base, get_order(size));
432 return NULL;
433 }
434
435 return base;
436}
437
438static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
439 dma_addr_t *dmaaddr, size_t size)
440{
441 void *base;
442
443 base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
444 GFP_KERNEL);
445 if (!base) {
446 b43err(ring->dev->wl, "Failed to allocate or map pages "
447 "for DMA ringmemory\n");
448 return NULL;
449 }
450 if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
451 /* The memory does not fit our device constraints.
452 * Retry with GFP_DMA set to get lower memory. */
453 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
454 base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
455 GFP_KERNEL | GFP_DMA);
456 if (!base) {
457 b43err(ring->dev->wl, "Failed to allocate or map pages "
458 "in the GFP_DMA region for DMA ringmemory\n");
459 return NULL;
460 }
461 if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
462 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
463 b43err(ring->dev->wl, "Failed to allocate DMA "
464 "ringmemory that fits device constraints\n");
465 return NULL;
466 }
467 }
468 /* We expect the memory to be 4k aligned, at least. */
469 if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
470 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
471 return NULL;
472 }
473
474 return base;
475}
476
477static int alloc_ringmemory(struct b43_dmaring *ring) 386static int alloc_ringmemory(struct b43_dmaring *ring)
478{ 387{
479 unsigned int required; 388 gfp_t flags = GFP_KERNEL;
480 void *base; 389
481 dma_addr_t dmaaddr; 390 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
482 391 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
483 /* There are several requirements to the descriptor ring memory: 392 * has shown that 4K is sufficient for the latter as long as the buffer
484 * - The memory region needs to fit the address constraints for the 393 * does not cross an 8K boundary.
485 * device (same as for frame buffers). 394 *
486 * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned. 395 * For unknown reasons - possibly a hardware error - the BCM4311 rev
487 * - For 64bit DMA devices, the descriptor ring must be 8k aligned. 396 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
397 * which accounts for the GFP_DMA flag below.
398 *
399 * The flags here must match the flags in free_ringmemory below!
488 */ 400 */
489
490 if (ring->type == B43_DMA_64BIT) 401 if (ring->type == B43_DMA_64BIT)
491 required = ring->nr_slots * sizeof(struct b43_dmadesc64); 402 flags |= GFP_DMA;
492 else 403 ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
493 required = ring->nr_slots * sizeof(struct b43_dmadesc32); 404 B43_DMA_RINGMEMSIZE,
494 if (B43_WARN_ON(required > 0x1000)) 405 &(ring->dmabase), flags);
406 if (!ring->descbase) {
407 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
495 return -ENOMEM; 408 return -ENOMEM;
496
497 ring->alloc_descsize = 0x1000;
498 base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
499 if (!base)
500 return -ENOMEM;
501 ring->alloc_descbase = base;
502 ring->alloc_dmabase = dmaaddr;
503
504 if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
505 /* We're on <=32bit DMA, or we already got 8k aligned memory.
506 * That's all we need, so we're fine. */
507 ring->descbase = base;
508 ring->dmabase = dmaaddr;
509 return 0;
510 }
511 b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
512
513 /* Ok, we failed at the 8k alignment requirement.
514 * Try to force-align the memory region now. */
515 ring->alloc_descsize = 0x2000;
516 base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
517 if (!base)
518 return -ENOMEM;
519 ring->alloc_descbase = base;
520 ring->alloc_dmabase = dmaaddr;
521
522 if (is_8k_aligned(dmaaddr)) {
523 /* We're already 8k aligned. That Ok, too. */
524 ring->descbase = base;
525 ring->dmabase = dmaaddr;
526 return 0;
527 } 409 }
528 /* Force-align it to 8k */ 410 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
529 ring->descbase = (void *)((u8 *)base + 0x1000);
530 ring->dmabase = dmaaddr + 0x1000;
531 B43_WARN_ON(!is_8k_aligned(ring->dmabase));
532 411
533 return 0; 412 return 0;
534} 413}
535 414
536static void free_ringmemory(struct b43_dmaring *ring) 415static void free_ringmemory(struct b43_dmaring *ring)
537{ 416{
538 b43_unmap_and_free_ringmem(ring, ring->alloc_descbase, 417 gfp_t flags = GFP_KERNEL;
539 ring->alloc_dmabase, ring->alloc_descsize); 418
419 if (ring->type == B43_DMA_64BIT)
420 flags |= GFP_DMA;
421
422 ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
423 ring->descbase, ring->dmabase, flags);
540} 424}
541 425
542/* Reset the RX DMA channel */ 426/* Reset the RX DMA channel */
@@ -646,14 +530,29 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
646 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) 530 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
647 return 1; 531 return 1;
648 532
649 if (!b43_dma_address_ok(ring, addr, buffersize)) { 533 switch (ring->type) {
650 /* We can't support this address. Unmap it again. */ 534 case B43_DMA_30BIT:
651 unmap_descbuffer(ring, addr, buffersize, dma_to_device); 535 if ((u64)addr + buffersize > (1ULL << 30))
652 return 1; 536 goto address_error;
537 break;
538 case B43_DMA_32BIT:
539 if ((u64)addr + buffersize > (1ULL << 32))
540 goto address_error;
541 break;
542 case B43_DMA_64BIT:
543 /* Currently we can't have addresses beyond
544 * 64bit in the kernel. */
545 break;
653 } 546 }
654 547
655 /* The address is OK. */ 548 /* The address is OK. */
656 return 0; 549 return 0;
550
551address_error:
552 /* We can't support this address. Unmap it again. */
553 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
554
555 return 1;
657} 556}
658 557
659static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) 558static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -715,9 +614,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
715 meta->dmaaddr = dmaaddr; 614 meta->dmaaddr = dmaaddr;
716 ring->ops->fill_descriptor(ring, desc, dmaaddr, 615 ring->ops->fill_descriptor(ring, desc, dmaaddr,
717 ring->rx_buffersize, 0, 0, 0); 616 ring->rx_buffersize, 0, 0, 0);
718 ssb_dma_sync_single_for_device(ring->dev->dev,
719 ring->alloc_dmabase,
720 ring->alloc_descsize, DMA_TO_DEVICE);
721 617
722 return 0; 618 return 0;
723} 619}
@@ -1354,9 +1250,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1354 } 1250 }
1355 /* Now transfer the whole frame. */ 1251 /* Now transfer the whole frame. */
1356 wmb(); 1252 wmb();
1357 ssb_dma_sync_single_for_device(ring->dev->dev,
1358 ring->alloc_dmabase,
1359 ring->alloc_descsize, DMA_TO_DEVICE);
1360 ops->poke_tx(ring, next_slot(ring, slot)); 1253 ops->poke_tx(ring, next_slot(ring, slot));
1361 return 0; 1254 return 0;
1362 1255
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index e607b392314c..f7ab37c4cdbc 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -157,6 +157,7 @@ struct b43_dmadesc_generic {
157} __attribute__ ((__packed__)); 157} __attribute__ ((__packed__));
158 158
159/* Misc DMA constants */ 159/* Misc DMA constants */
160#define B43_DMA_RINGMEMSIZE PAGE_SIZE
160#define B43_DMA0_RX_FRAMEOFFSET 30 161#define B43_DMA0_RX_FRAMEOFFSET 30
161 162
162/* DMA engine tuning knobs */ 163/* DMA engine tuning knobs */
@@ -246,12 +247,6 @@ struct b43_dmaring {
246 /* The QOS priority assigned to this ring. Only used for TX rings. 247 /* The QOS priority assigned to this ring. Only used for TX rings.
247 * This is the mac80211 "queue" value. */ 248 * This is the mac80211 "queue" value. */
248 u8 queue_prio; 249 u8 queue_prio;
249 /* Pointers and size of the originally allocated and mapped memory
250 * region for the descriptor ring. */
251 void *alloc_descbase;
252 dma_addr_t alloc_dmabase;
253 unsigned int alloc_descsize;
254 /* Pointer to our wireless device. */
255 struct b43_wldev *dev; 250 struct b43_wldev *dev;
256#ifdef CONFIG_B43_DEBUG 251#ifdef CONFIG_B43_DEBUG
257 /* Maximum number of used slots. */ 252 /* Maximum number of used slots. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4c41cfe44f26..490fb45d1d05 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -628,10 +628,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev)
628static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) 628static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
629{ 629{
630 /* slot_time is in usec. */ 630 /* slot_time is in usec. */
631 if (dev->phy.type != B43_PHYTYPE_G) 631 /* This test used to exit for all but a G PHY. */
632 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
632 return; 633 return;
633 b43_write16(dev, 0x684, 510 + slot_time); 634 b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
634 b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); 635 /* Shared memory location 0x0010 is the slot time and should be
636 * set to slot_time; however, this register is initially 0 and changing
637 * the value adversely affects the transmit rate for BCM4311
638 * devices. Until this behavior is unterstood, delete this step
639 *
640 * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
641 */
635} 642}
636 643
637static void b43_short_slot_timing_enable(struct b43_wldev *dev) 644static void b43_short_slot_timing_enable(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 7da1dab933d9..234891d8cc10 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -681,19 +681,13 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
681 snr = rx_stats_sig_avg / rx_stats_noise_diff; 681 snr = rx_stats_sig_avg / rx_stats_noise_diff;
682 rx_status.noise = rx_status.signal - 682 rx_status.noise = rx_status.signal -
683 iwl3945_calc_db_from_ratio(snr); 683 iwl3945_calc_db_from_ratio(snr);
684 rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal,
685 rx_status.noise);
686
687 /* If noise info not available, calculate signal quality indicator (%)
688 * using just the dBm signal level. */
689 } else { 684 } else {
690 rx_status.noise = priv->last_rx_noise; 685 rx_status.noise = priv->last_rx_noise;
691 rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0);
692 } 686 }
693 687
694 688
695 IWL_DEBUG_STATS(priv, "Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n", 689 IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
696 rx_status.signal, rx_status.noise, rx_status.qual, 690 rx_status.signal, rx_status.noise,
697 rx_stats_sig_avg, rx_stats_noise_diff); 691 rx_stats_sig_avg, rx_stats_noise_diff);
698 692
699 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 693 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
@@ -1835,8 +1829,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1835 rc = -EIO; 1829 rc = -EIO;
1836 } 1830 }
1837 1831
1838 priv->alloc_rxb_page--; 1832 iwl_free_pages(priv, cmd.reply_page);
1839 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
1840 1833
1841 return rc; 1834 return rc;
1842} 1835}
@@ -2836,6 +2829,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2836 .use_isr_legacy = true, 2829 .use_isr_legacy = true,
2837 .ht_greenfield_support = false, 2830 .ht_greenfield_support = false,
2838 .led_compensation = 64, 2831 .led_compensation = 64,
2832 .broken_powersave = true,
2839}; 2833};
2840 2834
2841static struct iwl_cfg iwl3945_abg_cfg = { 2835static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2852,6 +2846,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2852 .use_isr_legacy = true, 2846 .use_isr_legacy = true,
2853 .ht_greenfield_support = false, 2847 .ht_greenfield_support = false,
2854 .led_compensation = 64, 2848 .led_compensation = 64,
2849 .broken_powersave = true,
2855}; 2850};
2856 2851
2857struct pci_device_id iwl3945_hw_card_ids[] = { 2852struct pci_device_id iwl3945_hw_card_ids[] = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index ecc23ec1f6a4..531fa125f5a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -222,7 +222,6 @@ struct iwl3945_ibss_seq {
222 * 222 *
223 *****************************************************************************/ 223 *****************************************************************************/
224extern int iwl3945_calc_db_from_ratio(int sig_ratio); 224extern int iwl3945_calc_db_from_ratio(int sig_ratio);
225extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
226extern void iwl3945_rx_replenish(void *data); 225extern void iwl3945_rx_replenish(void *data);
227extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 226extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
228extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 227extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 386513b601f5..9b4b8b5c7574 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1204,7 +1204,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1204 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); 1204 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
1205 1205
1206 /* calculate tx gain adjustment based on power supply voltage */ 1206 /* calculate tx gain adjustment based on power supply voltage */
1207 voltage = priv->calib_info->voltage; 1207 voltage = le16_to_cpu(priv->calib_info->voltage);
1208 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); 1208 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
1209 voltage_compensation = 1209 voltage_compensation =
1210 iwl4965_get_voltage_compensation(voltage, init_voltage); 1210 iwl4965_get_voltage_compensation(voltage, init_voltage);
@@ -1961,7 +1961,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
1961 struct ieee80211_tx_info *info; 1961 struct ieee80211_tx_info *info;
1962 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 1962 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1963 u32 status = le32_to_cpu(tx_resp->u.status); 1963 u32 status = le32_to_cpu(tx_resp->u.status);
1964 int tid = MAX_TID_COUNT; 1964 int uninitialized_var(tid);
1965 int sta_id; 1965 int sta_id;
1966 int freed; 1966 int freed;
1967 u8 *qc = NULL; 1967 u8 *qc = NULL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 4ef6804a455a..bc056e9ab85f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -92,11 +92,15 @@
92 92
93static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) 93static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
94{ 94{
95 u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv, 95 u16 temperature, voltage;
96 EEPROM_5000_TEMPERATURE); 96 __le16 *temp_calib =
97 /* offset = temperature - voltage / coef */ 97 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
98 s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); 98
99 return offset; 99 temperature = le16_to_cpu(temp_calib[0]);
100 voltage = le16_to_cpu(temp_calib[1]);
101
102 /* offset = temp - volt / coeff */
103 return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
100} 104}
101 105
102/* Fixed (non-configurable) rx data from phy */ 106/* Fixed (non-configurable) rx data from phy */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e2f8615c8c9b..de45f308b744 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -333,14 +333,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
333static int iwl5000_set_Xtal_calib(struct iwl_priv *priv) 333static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
334{ 334{
335 struct iwl_calib_xtal_freq_cmd cmd; 335 struct iwl_calib_xtal_freq_cmd cmd;
336 u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); 336 __le16 *xtal_calib =
337 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
337 338
338 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; 339 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
339 cmd.hdr.first_group = 0; 340 cmd.hdr.first_group = 0;
340 cmd.hdr.groups_num = 1; 341 cmd.hdr.groups_num = 1;
341 cmd.hdr.data_valid = 1; 342 cmd.hdr.data_valid = 1;
342 cmd.cap_pin1 = (u8)xtal_calib[0]; 343 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
343 cmd.cap_pin2 = (u8)xtal_calib[1]; 344 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
344 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], 345 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
345 (u8 *)&cmd, sizeof(cmd)); 346 (u8 *)&cmd, sizeof(cmd));
346} 347}
@@ -1597,6 +1598,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
1597 .use_bsm = false, 1598 .use_bsm = false,
1598 .ht_greenfield_support = true, 1599 .ht_greenfield_support = true,
1599 .led_compensation = 51, 1600 .led_compensation = 51,
1601 .use_rts_for_ht = true, /* use rts/cts protection */
1600 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1602 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1601 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1603 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1602}; 1604};
@@ -1621,6 +1623,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
1621 .use_bsm = false, 1623 .use_bsm = false,
1622 .ht_greenfield_support = true, 1624 .ht_greenfield_support = true,
1623 .led_compensation = 51, 1625 .led_compensation = 51,
1626 .use_rts_for_ht = true, /* use rts/cts protection */
1624 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1627 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1625}; 1628};
1626 1629
@@ -1666,6 +1669,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
1666 .use_bsm = false, 1669 .use_bsm = false,
1667 .ht_greenfield_support = true, 1670 .ht_greenfield_support = true,
1668 .led_compensation = 51, 1671 .led_compensation = 51,
1672 .use_rts_for_ht = true, /* use rts/cts protection */
1669 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1673 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1670 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1674 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1671}; 1675};
@@ -1690,6 +1694,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
1690 .use_bsm = false, 1694 .use_bsm = false,
1691 .ht_greenfield_support = true, 1695 .ht_greenfield_support = true,
1692 .led_compensation = 51, 1696 .led_compensation = 51,
1697 .use_rts_for_ht = true, /* use rts/cts protection */
1693 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1698 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1694 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1699 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1695}; 1700};
@@ -1714,6 +1719,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
1714 .use_bsm = false, 1719 .use_bsm = false,
1715 .ht_greenfield_support = true, 1720 .ht_greenfield_support = true,
1716 .led_compensation = 51, 1721 .led_compensation = 51,
1722 .use_rts_for_ht = true, /* use rts/cts protection */
1717 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1723 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1718 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1724 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1719}; 1725};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index fe511cbf012e..b93e49158196 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -150,7 +150,7 @@ static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
150}; 150};
151 151
152/* mbps, mcs */ 152/* mbps, mcs */
153const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { 153static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
154 { "1", "BPSK DSSS"}, 154 { "1", "BPSK DSSS"},
155 { "2", "QPSK DSSS"}, 155 { "2", "QPSK DSSS"},
156 {"5.5", "BPSK CCK"}, 156 {"5.5", "BPSK CCK"},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b8377efb3ba7..1c9866daf815 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1842,7 +1842,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1842 } 1842 }
1843 1843
1844#ifdef CONFIG_IWLWIFI_DEBUG 1844#ifdef CONFIG_IWLWIFI_DEBUG
1845 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)) 1845 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1846 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) 1846 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1847 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; 1847 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1848#else 1848#else
@@ -3173,7 +3173,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
3173 3173
3174 priv->ibss_beacon = NULL; 3174 priv->ibss_beacon = NULL;
3175 3175
3176 spin_lock_init(&priv->lock);
3177 spin_lock_init(&priv->sta_lock); 3176 spin_lock_init(&priv->sta_lock);
3178 spin_lock_init(&priv->hcmd_lock); 3177 spin_lock_init(&priv->hcmd_lock);
3179 3178
@@ -3361,10 +3360,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3361 (unsigned long long) pci_resource_len(pdev, 0)); 3360 (unsigned long long) pci_resource_len(pdev, 0));
3362 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); 3361 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3363 3362
3364 /* this spin lock will be used in apm_ops.init and EEPROM access 3363 /* these spin locks will be used in apm_ops.init and EEPROM access
3365 * we should init now 3364 * we should init now
3366 */ 3365 */
3367 spin_lock_init(&priv->reg_lock); 3366 spin_lock_init(&priv->reg_lock);
3367 spin_lock_init(&priv->lock);
3368 iwl_hw_detect(priv); 3368 iwl_hw_detect(priv);
3369 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", 3369 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
3370 priv->cfg->name, priv->hw_rev); 3370 priv->cfg->name, priv->hw_rev);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 574d36658702..d10bea64fce3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2344,6 +2344,21 @@ static void iwl_ht_conf(struct iwl_priv *priv,
2344 IWL_DEBUG_MAC80211(priv, "leave\n"); 2344 IWL_DEBUG_MAC80211(priv, "leave\n");
2345} 2345}
2346 2346
2347static inline void iwl_set_no_assoc(struct iwl_priv *priv)
2348{
2349 priv->assoc_id = 0;
2350 iwl_led_disassociate(priv);
2351 /*
2352 * inform the ucode that there is no longer an
2353 * association and that no more packets should be
2354 * sent
2355 */
2356 priv->staging_rxon.filter_flags &=
2357 ~RXON_FILTER_ASSOC_MSK;
2358 priv->staging_rxon.assoc_id = 0;
2359 iwlcore_commit_rxon(priv);
2360}
2361
2347#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 2362#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
2348void iwl_bss_info_changed(struct ieee80211_hw *hw, 2363void iwl_bss_info_changed(struct ieee80211_hw *hw,
2349 struct ieee80211_vif *vif, 2364 struct ieee80211_vif *vif,
@@ -2475,20 +2490,8 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2475 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC; 2490 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
2476 if (!iwl_is_rfkill(priv)) 2491 if (!iwl_is_rfkill(priv))
2477 priv->cfg->ops->lib->post_associate(priv); 2492 priv->cfg->ops->lib->post_associate(priv);
2478 } else { 2493 } else
2479 priv->assoc_id = 0; 2494 iwl_set_no_assoc(priv);
2480 iwl_led_disassociate(priv);
2481
2482 /*
2483 * inform the ucode that there is no longer an
2484 * association and that no more packets should be
2485 * send
2486 */
2487 priv->staging_rxon.filter_flags &=
2488 ~RXON_FILTER_ASSOC_MSK;
2489 priv->staging_rxon.assoc_id = 0;
2490 iwlcore_commit_rxon(priv);
2491 }
2492 } 2495 }
2493 2496
2494 if (changes && iwl_is_associated(priv) && priv->assoc_id) { 2497 if (changes && iwl_is_associated(priv) && priv->assoc_id) {
@@ -2503,12 +2506,14 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2503 } 2506 }
2504 } 2507 }
2505 2508
2506 if ((changes & BSS_CHANGED_BEACON_ENABLED) && 2509 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2507 vif->bss_conf.enable_beacon) { 2510 if (vif->bss_conf.enable_beacon) {
2508 memcpy(priv->staging_rxon.bssid_addr, 2511 memcpy(priv->staging_rxon.bssid_addr,
2509 bss_conf->bssid, ETH_ALEN); 2512 bss_conf->bssid, ETH_ALEN);
2510 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); 2513 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2511 iwlcore_config_ap(priv); 2514 iwlcore_config_ap(priv);
2515 } else
2516 iwl_set_no_assoc(priv);
2512 } 2517 }
2513 2518
2514 mutex_unlock(&priv->mutex); 2519 mutex_unlock(&priv->mutex);
@@ -2739,6 +2744,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2739 if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) 2744 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
2740 priv->staging_rxon.flags = 0; 2745 priv->staging_rxon.flags = 0;
2741 2746
2747 iwl_set_rxon_ht(priv, ht_conf);
2742 iwl_set_rxon_channel(priv, conf->channel); 2748 iwl_set_rxon_channel(priv, conf->channel);
2743 2749
2744 iwl_set_flags_for_band(priv, conf->channel->band); 2750 iwl_set_flags_for_band(priv, conf->channel->band);
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index a7bfae01f19b..1ec8cb4d5eae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -77,8 +77,7 @@
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing 77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers. 78 * the CSR registers.
79 * 79 *
80 * NOTE: Newer devices using one-time-programmable (OTP) memory 80 * NOTE: Device does need to be awake in order to read this memory
81 * require device to be awake in order to read this memory
82 * via CSR_EEPROM and CSR_OTP registers 81 * via CSR_EEPROM and CSR_OTP registers
83 */ 82 */
84#define CSR_BASE (0x000) 83#define CSR_BASE (0x000)
@@ -111,9 +110,8 @@
111/* 110/*
112 * EEPROM and OTP (one-time-programmable) memory reads 111 * EEPROM and OTP (one-time-programmable) memory reads
113 * 112 *
114 * NOTE: For (newer) devices using OTP, device must be awake, initialized via 113 * NOTE: Device must be awake, initialized via apm_ops.init(),
115 * apm_ops.init() in order to read. Older devices (3945/4965/5000) 114 * in order to read.
116 * use EEPROM and do not require this.
117 */ 115 */
118#define CSR_EEPROM_REG (CSR_BASE+0x02c) 116#define CSR_EEPROM_REG (CSR_BASE+0x02c)
119#define CSR_EEPROM_GP (CSR_BASE+0x030) 117#define CSR_EEPROM_GP (CSR_BASE+0x030)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 2673e9a4db92..3822cf53e368 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -711,7 +711,7 @@ extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
711extern int iwl_queue_space(const struct iwl_queue *q); 711extern int iwl_queue_space(const struct iwl_queue *q);
712static inline int iwl_queue_used(const struct iwl_queue *q, int i) 712static inline int iwl_queue_used(const struct iwl_queue *q, int i)
713{ 713{
714 return q->write_ptr > q->read_ptr ? 714 return q->write_ptr >= q->read_ptr ?
715 (i >= q->read_ptr && i < q->write_ptr) : 715 (i >= q->read_ptr && i < q->write_ptr) :
716 !(i < q->read_ptr && i >= q->write_ptr); 716 !(i < q->read_ptr && i >= q->write_ptr);
717} 717}
@@ -1168,7 +1168,7 @@ struct iwl_priv {
1168 u32 last_beacon_time; 1168 u32 last_beacon_time;
1169 u64 last_tsf; 1169 u64 last_tsf;
1170 1170
1171 /* eeprom */ 1171 /* eeprom -- this is in the card's little endian byte order */
1172 u8 *eeprom; 1172 u8 *eeprom;
1173 int nvm_device_type; 1173 int nvm_device_type;
1174 struct iwl_eeprom_calib_info *calib_info; 1174 struct iwl_eeprom_calib_info *calib_info;
@@ -1353,4 +1353,15 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1353 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; 1353 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1354} 1354}
1355 1355
1356static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
1357{
1358 __free_pages(page, priv->hw_params.rx_page_order);
1359 priv->alloc_rxb_page--;
1360}
1361
1362static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
1363{
1364 free_pages(page, priv->hw_params.rx_page_order);
1365 priv->alloc_rxb_page--;
1366}
1356#endif /* __iwl_dev_h__ */ 1367#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index e7d88d1da15d..83cc4e500a96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,3 +1,29 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
1#include <linux/module.h> 27#include <linux/module.h>
2 28
3/* sparse doesn't like tracepoint macros */ 29/* sparse doesn't like tracepoint macros */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 21361968ab7e..d9c7363b1bbb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,3 +1,29 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
1#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) 27#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
2#define __IWLWIFI_DEVICE_TRACE 28#define __IWLWIFI_DEVICE_TRACE
3 29
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 3946e5c03f81..4a30969689ff 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -370,7 +370,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
370 return ret; 370 return ret;
371} 371}
372 372
373static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data) 373static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
374{ 374{
375 int ret = 0; 375 int ret = 0;
376 u32 r; 376 u32 r;
@@ -404,7 +404,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
404 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); 404 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
405 IWL_ERR(priv, "Correctable OTP ECC error, continue read\n"); 405 IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
406 } 406 }
407 *eeprom_data = le16_to_cpu((__force __le16)(r >> 16)); 407 *eeprom_data = cpu_to_le16(r >> 16);
408 return 0; 408 return 0;
409} 409}
410 410
@@ -413,7 +413,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
413 */ 413 */
414static bool iwl_is_otp_empty(struct iwl_priv *priv) 414static bool iwl_is_otp_empty(struct iwl_priv *priv)
415{ 415{
416 u16 next_link_addr = 0, link_value; 416 u16 next_link_addr = 0;
417 __le16 link_value;
417 bool is_empty = false; 418 bool is_empty = false;
418 419
419 /* locate the beginning of OTP link list */ 420 /* locate the beginning of OTP link list */
@@ -443,7 +444,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
443static int iwl_find_otp_image(struct iwl_priv *priv, 444static int iwl_find_otp_image(struct iwl_priv *priv,
444 u16 *validblockaddr) 445 u16 *validblockaddr)
445{ 446{
446 u16 next_link_addr = 0, link_value = 0, valid_addr; 447 u16 next_link_addr = 0, valid_addr;
448 __le16 link_value = 0;
447 int usedblocks = 0; 449 int usedblocks = 0;
448 450
449 /* set addressing mode to absolute to traverse the link list */ 451 /* set addressing mode to absolute to traverse the link list */
@@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
463 * check for more block on the link list 465 * check for more block on the link list
464 */ 466 */
465 valid_addr = next_link_addr; 467 valid_addr = next_link_addr;
466 next_link_addr = link_value * sizeof(u16); 468 next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
467 IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", 469 IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
468 usedblocks, next_link_addr); 470 usedblocks, next_link_addr);
469 if (iwl_read_otp_word(priv, next_link_addr, &link_value)) 471 if (iwl_read_otp_word(priv, next_link_addr, &link_value))
@@ -497,7 +499,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
497 */ 499 */
498int iwl_eeprom_init(struct iwl_priv *priv) 500int iwl_eeprom_init(struct iwl_priv *priv)
499{ 501{
500 u16 *e; 502 __le16 *e;
501 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 503 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
502 int sz; 504 int sz;
503 int ret; 505 int ret;
@@ -516,12 +518,9 @@ int iwl_eeprom_init(struct iwl_priv *priv)
516 ret = -ENOMEM; 518 ret = -ENOMEM;
517 goto alloc_err; 519 goto alloc_err;
518 } 520 }
519 e = (u16 *)priv->eeprom; 521 e = (__le16 *)priv->eeprom;
520 522
521 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) { 523 priv->cfg->ops->lib->apm_ops.init(priv);
522 /* OTP reads require powered-up chip */
523 priv->cfg->ops->lib->apm_ops.init(priv);
524 }
525 524
526 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); 525 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
527 if (ret < 0) { 526 if (ret < 0) {
@@ -562,7 +561,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
562 } 561 }
563 for (addr = validblockaddr; addr < validblockaddr + sz; 562 for (addr = validblockaddr; addr < validblockaddr + sz;
564 addr += sizeof(u16)) { 563 addr += sizeof(u16)) {
565 u16 eeprom_data; 564 __le16 eeprom_data;
566 565
567 ret = iwl_read_otp_word(priv, addr, &eeprom_data); 566 ret = iwl_read_otp_word(priv, addr, &eeprom_data);
568 if (ret) 567 if (ret)
@@ -570,13 +569,6 @@ int iwl_eeprom_init(struct iwl_priv *priv)
570 e[cache_addr / 2] = eeprom_data; 569 e[cache_addr / 2] = eeprom_data;
571 cache_addr += sizeof(u16); 570 cache_addr += sizeof(u16);
572 } 571 }
573
574 /*
575 * Now that OTP reads are complete, reset chip to save
576 * power until we load uCode during "up".
577 */
578 priv->cfg->ops->lib->apm_ops.stop(priv);
579
580 } else { 572 } else {
581 /* eeprom is an array of 16bit values */ 573 /* eeprom is an array of 16bit values */
582 for (addr = 0; addr < sz; addr += sizeof(u16)) { 574 for (addr = 0; addr < sz; addr += sizeof(u16)) {
@@ -594,7 +586,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
594 goto done; 586 goto done;
595 } 587 }
596 r = _iwl_read_direct32(priv, CSR_EEPROM_REG); 588 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
597 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); 589 e[addr / 2] = cpu_to_le16(r >> 16);
598 } 590 }
599 } 591 }
600 ret = 0; 592 ret = 0;
@@ -603,6 +595,8 @@ done:
603err: 595err:
604 if (ret) 596 if (ret)
605 iwl_eeprom_free(priv); 597 iwl_eeprom_free(priv);
598 /* Reset chip to save power until we load uCode during "up". */
599 priv->cfg->ops->lib->apm_ops.stop(priv);
606alloc_err: 600alloc_err:
607 return ret; 601 return ret;
608} 602}
@@ -755,7 +749,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
755 ch_info->ht40_eeprom = *eeprom_ch; 749 ch_info->ht40_eeprom = *eeprom_ch;
756 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; 750 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
757 ch_info->ht40_flags = eeprom_ch->flags; 751 ch_info->ht40_flags = eeprom_ch->flags;
758 ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; 752 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
753 ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
759 754
760 return 0; 755 return 0;
761} 756}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 5cd2b66bbe45..0cd9c02ee044 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -137,7 +137,7 @@ struct iwl_eeprom_channel {
137 * 137 *
138 */ 138 */
139struct iwl_eeprom_enhanced_txpwr { 139struct iwl_eeprom_enhanced_txpwr {
140 u16 common; 140 __le16 common;
141 s8 chain_a_max; 141 s8 chain_a_max;
142 s8 chain_b_max; 142 s8 chain_b_max;
143 s8 chain_c_max; 143 s8 chain_c_max;
@@ -360,7 +360,7 @@ struct iwl_eeprom_calib_subband_info {
360struct iwl_eeprom_calib_info { 360struct iwl_eeprom_calib_info {
361 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ 361 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
362 u8 saturation_power52; /* half-dBm */ 362 u8 saturation_power52; /* half-dBm */
363 s16 voltage; /* signed */ 363 __le16 voltage; /* signed */
364 struct iwl_eeprom_calib_subband_info 364 struct iwl_eeprom_calib_subband_info
365 band_info[EEPROM_TX_POWER_BANDS]; 365 band_info[EEPROM_TX_POWER_BANDS];
366} __attribute__ ((packed)); 366} __attribute__ ((packed));
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index a23165948202..30e9ea6d54ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -234,7 +234,7 @@ cancel:
234 } 234 }
235fail: 235fail:
236 if (cmd->reply_page) { 236 if (cmd->reply_page) {
237 free_pages(cmd->reply_page, priv->hw_params.rx_page_order); 237 iwl_free_pages(priv, cmd->reply_page);
238 cmd->reply_page = 0; 238 cmd->reply_page = 0;
239 } 239 }
240out: 240out:
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6090bc15a6d5..2dbce85404aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -345,10 +345,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
345 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 345 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
346 PAGE_SIZE << priv->hw_params.rx_page_order, 346 PAGE_SIZE << priv->hw_params.rx_page_order,
347 PCI_DMA_FROMDEVICE); 347 PCI_DMA_FROMDEVICE);
348 __free_pages(rxq->pool[i].page, 348 __iwl_free_pages(priv, rxq->pool[i].page);
349 priv->hw_params.rx_page_order);
350 rxq->pool[i].page = NULL; 349 rxq->pool[i].page = NULL;
351 priv->alloc_rxb_page--;
352 } 350 }
353 } 351 }
354 352
@@ -416,9 +414,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
416 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 414 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
417 PAGE_SIZE << priv->hw_params.rx_page_order, 415 PAGE_SIZE << priv->hw_params.rx_page_order,
418 PCI_DMA_FROMDEVICE); 416 PCI_DMA_FROMDEVICE);
419 priv->alloc_rxb_page--; 417 __iwl_free_pages(priv, rxq->pool[i].page);
420 __free_pages(rxq->pool[i].page,
421 priv->hw_params.rx_page_order);
422 rxq->pool[i].page = NULL; 418 rxq->pool[i].page = NULL;
423 } 419 }
424 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 420 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -654,47 +650,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
654} 650}
655EXPORT_SYMBOL(iwl_reply_statistics); 651EXPORT_SYMBOL(iwl_reply_statistics);
656 652
657#define PERFECT_RSSI (-20) /* dBm */
658#define WORST_RSSI (-95) /* dBm */
659#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
660
661/* Calculate an indication of rx signal quality (a percentage, not dBm!).
662 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
663 * about formulas used below. */
664static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
665{
666 int sig_qual;
667 int degradation = PERFECT_RSSI - rssi_dbm;
668
669 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
670 * as indicator; formula is (signal dbm - noise dbm).
671 * SNR at or above 40 is a great signal (100%).
672 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
673 * Weakest usable signal is usually 10 - 15 dB SNR. */
674 if (noise_dbm) {
675 if (rssi_dbm - noise_dbm >= 40)
676 return 100;
677 else if (rssi_dbm < noise_dbm)
678 return 0;
679 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
680
681 /* Else use just the signal level.
682 * This formula is a least squares fit of data points collected and
683 * compared with a reference system that had a percentage (%) display
684 * for signal quality. */
685 } else
686 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
687 (15 * RSSI_RANGE + 62 * degradation)) /
688 (RSSI_RANGE * RSSI_RANGE);
689
690 if (sig_qual > 100)
691 sig_qual = 100;
692 else if (sig_qual < 1)
693 sig_qual = 0;
694
695 return sig_qual;
696}
697
698/* Calc max signal level (dBm) among 3 possible receivers */ 653/* Calc max signal level (dBm) among 3 possible receivers */
699static inline int iwl_calc_rssi(struct iwl_priv *priv, 654static inline int iwl_calc_rssi(struct iwl_priv *priv,
700 struct iwl_rx_phy_res *rx_resp) 655 struct iwl_rx_phy_res *rx_resp)
@@ -973,7 +928,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
973 if (ieee80211_is_mgmt(fc) || 928 if (ieee80211_is_mgmt(fc) ||
974 ieee80211_has_protected(fc) || 929 ieee80211_has_protected(fc) ||
975 ieee80211_has_morefrags(fc) || 930 ieee80211_has_morefrags(fc) ||
976 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) 931 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
932 (ieee80211_is_data_qos(fc) &&
933 *ieee80211_get_qos_ctl(hdr) &
934 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
977 ret = skb_linearize(skb); 935 ret = skb_linearize(skb);
978 else 936 else
979 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? 937 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
@@ -1105,11 +1063,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1105 if (iwl_is_associated(priv) && 1063 if (iwl_is_associated(priv) &&
1106 !test_bit(STATUS_SCANNING, &priv->status)) { 1064 !test_bit(STATUS_SCANNING, &priv->status)) {
1107 rx_status.noise = priv->last_rx_noise; 1065 rx_status.noise = priv->last_rx_noise;
1108 rx_status.qual = iwl_calc_sig_qual(rx_status.signal,
1109 rx_status.noise);
1110 } else { 1066 } else {
1111 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 1067 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1112 rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0);
1113 } 1068 }
1114 1069
1115 /* Reset beacon noise level if not associated. */ 1070 /* Reset beacon noise level if not associated. */
@@ -1122,8 +1077,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1122 iwl_dbg_report_frame(priv, phy_res, len, header, 1); 1077 iwl_dbg_report_frame(priv, phy_res, len, header, 1);
1123#endif 1078#endif
1124 iwl_dbg_log_rx_data_frame(priv, len, header); 1079 iwl_dbg_log_rx_data_frame(priv, len, header);
1125 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n", 1080 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
1126 rx_status.signal, rx_status.noise, rx_status.qual, 1081 rx_status.signal, rx_status.noise,
1127 (unsigned long long)rx_status.mactime); 1082 (unsigned long long)rx_status.mactime);
1128 1083
1129 /* 1084 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index a2b2b8315ff9..fa1c89ba6459 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -144,8 +144,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
144 clear_bit(STATUS_SCAN_HW, &priv->status); 144 clear_bit(STATUS_SCAN_HW, &priv->status);
145 } 145 }
146 146
147 priv->alloc_rxb_page--; 147 iwl_free_pages(priv, cmd.reply_page);
148 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
149 148
150 return ret; 149 return ret;
151} 150}
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index cd6a6901216e..90fbdb25399e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -164,9 +164,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
164 break; 164 break;
165 } 165 }
166 } 166 }
167 167 iwl_free_pages(priv, cmd.reply_page);
168 priv->alloc_rxb_page--;
169 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
170 168
171 return ret; 169 return ret;
172} 170}
@@ -299,7 +297,7 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
299} 297}
300EXPORT_SYMBOL(iwl_add_station); 298EXPORT_SYMBOL(iwl_add_station);
301 299
302static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr) 300static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr)
303{ 301{
304 unsigned long flags; 302 unsigned long flags;
305 u8 sta_id = iwl_find_station(priv, addr); 303 u8 sta_id = iwl_find_station(priv, addr);
@@ -326,7 +324,7 @@ static void iwl_remove_sta_callback(struct iwl_priv *priv,
326{ 324{
327 struct iwl_rem_sta_cmd *rm_sta = 325 struct iwl_rem_sta_cmd *rm_sta =
328 (struct iwl_rem_sta_cmd *)cmd->cmd.payload; 326 (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
329 const char *addr = rm_sta->addr; 327 const u8 *addr = rm_sta->addr;
330 328
331 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 329 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
332 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 330 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
@@ -391,9 +389,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
391 break; 389 break;
392 } 390 }
393 } 391 }
394 392 iwl_free_pages(priv, cmd.reply_page);
395 priv->alloc_rxb_page--;
396 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
397 393
398 return ret; 394 return ret;
399} 395}
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 00da5e152d46..87ce2bd292c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -407,13 +407,14 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
407 int txq_id; 407 int txq_id;
408 408
409 /* Tx queues */ 409 /* Tx queues */
410 if (priv->txq) 410 if (priv->txq) {
411 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 411 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
412 txq_id++) 412 txq_id++)
413 if (txq_id == IWL_CMD_QUEUE_NUM) 413 if (txq_id == IWL_CMD_QUEUE_NUM)
414 iwl_cmd_queue_free(priv); 414 iwl_cmd_queue_free(priv);
415 else 415 else
416 iwl_tx_queue_free(priv, txq_id); 416 iwl_tx_queue_free(priv, txq_id);
417 }
417 iwl_free_dma_ptr(priv, &priv->kw); 418 iwl_free_dma_ptr(priv, &priv->kw);
418 419
419 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); 420 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 2a28a1f8b1fe..f8e4e4b18d02 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -548,6 +548,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
548 txq = &priv->txq[txq_id]; 548 txq = &priv->txq[txq_id];
549 q = &txq->q; 549 q = &txq->q;
550 550
551 if ((iwl_queue_space(q) < q->high_mark))
552 goto drop;
553
551 spin_lock_irqsave(&priv->lock, flags); 554 spin_lock_irqsave(&priv->lock, flags);
552 555
553 idx = get_cmd_index(q, q->write_ptr, 0); 556 idx = get_cmd_index(q, q->write_ptr, 0);
@@ -812,7 +815,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
812 break; 815 break;
813 } 816 }
814 817
815 free_pages(cmd.reply_page, priv->hw_params.rx_page_order); 818 iwl_free_pages(priv, cmd.reply_page);
816 819
817 return rc; 820 return rc;
818} 821}
@@ -1198,9 +1201,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1198 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1201 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1199 PAGE_SIZE << priv->hw_params.rx_page_order, 1202 PAGE_SIZE << priv->hw_params.rx_page_order,
1200 PCI_DMA_FROMDEVICE); 1203 PCI_DMA_FROMDEVICE);
1201 priv->alloc_rxb_page--; 1204 __iwl_free_pages(priv, rxq->pool[i].page);
1202 __free_pages(rxq->pool[i].page,
1203 priv->hw_params.rx_page_order);
1204 rxq->pool[i].page = NULL; 1205 rxq->pool[i].page = NULL;
1205 } 1206 }
1206 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1207 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1247,10 +1248,8 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1247 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1248 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1248 PAGE_SIZE << priv->hw_params.rx_page_order, 1249 PAGE_SIZE << priv->hw_params.rx_page_order,
1249 PCI_DMA_FROMDEVICE); 1250 PCI_DMA_FROMDEVICE);
1250 __free_pages(rxq->pool[i].page, 1251 __iwl_free_pages(priv, rxq->pool[i].page);
1251 priv->hw_params.rx_page_order);
1252 rxq->pool[i].page = NULL; 1252 rxq->pool[i].page = NULL;
1253 priv->alloc_rxb_page--;
1254 } 1253 }
1255 } 1254 }
1256 1255
@@ -1300,47 +1299,6 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
1300 return (int)ratio2dB[sig_ratio]; 1299 return (int)ratio2dB[sig_ratio];
1301} 1300}
1302 1301
1303#define PERFECT_RSSI (-20) /* dBm */
1304#define WORST_RSSI (-95) /* dBm */
1305#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
1306
1307/* Calculate an indication of rx signal quality (a percentage, not dBm!).
1308 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
1309 * about formulas used below. */
1310int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
1311{
1312 int sig_qual;
1313 int degradation = PERFECT_RSSI - rssi_dbm;
1314
1315 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
1316 * as indicator; formula is (signal dbm - noise dbm).
1317 * SNR at or above 40 is a great signal (100%).
1318 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
1319 * Weakest usable signal is usually 10 - 15 dB SNR. */
1320 if (noise_dbm) {
1321 if (rssi_dbm - noise_dbm >= 40)
1322 return 100;
1323 else if (rssi_dbm < noise_dbm)
1324 return 0;
1325 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
1326
1327 /* Else use just the signal level.
1328 * This formula is a least squares fit of data points collected and
1329 * compared with a reference system that had a percentage (%) display
1330 * for signal quality. */
1331 } else
1332 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
1333 (15 * RSSI_RANGE + 62 * degradation)) /
1334 (RSSI_RANGE * RSSI_RANGE);
1335
1336 if (sig_qual > 100)
1337 sig_qual = 100;
1338 else if (sig_qual < 1)
1339 sig_qual = 0;
1340
1341 return sig_qual;
1342}
1343
1344/** 1302/**
1345 * iwl3945_rx_handle - Main entry function for receiving responses from uCode 1303 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
1346 * 1304 *
@@ -1688,7 +1646,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1688 } 1646 }
1689 1647
1690#ifdef CONFIG_IWLWIFI_DEBUG 1648#ifdef CONFIG_IWLWIFI_DEBUG
1691 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)) 1649 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1692 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) 1650 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1693 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; 1651 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1694#else 1652#else
@@ -3867,7 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3867 priv->retry_rate = 1; 3825 priv->retry_rate = 1;
3868 priv->ibss_beacon = NULL; 3826 priv->ibss_beacon = NULL;
3869 3827
3870 spin_lock_init(&priv->lock);
3871 spin_lock_init(&priv->sta_lock); 3828 spin_lock_init(&priv->sta_lock);
3872 spin_lock_init(&priv->hcmd_lock); 3829 spin_lock_init(&priv->hcmd_lock);
3873 3830
@@ -3936,9 +3893,11 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3936 /* Tell mac80211 our characteristics */ 3893 /* Tell mac80211 our characteristics */
3937 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3894 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3938 IEEE80211_HW_NOISE_DBM | 3895 IEEE80211_HW_NOISE_DBM |
3939 IEEE80211_HW_SPECTRUM_MGMT | 3896 IEEE80211_HW_SPECTRUM_MGMT;
3940 IEEE80211_HW_SUPPORTS_PS | 3897
3941 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3898 if (!priv->cfg->broken_powersave)
3899 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3900 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3942 3901
3943 hw->wiphy->interface_modes = 3902 hw->wiphy->interface_modes =
3944 BIT(NL80211_IFTYPE_STATION) | 3903 BIT(NL80211_IFTYPE_STATION) |
@@ -4057,10 +4016,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4057 * PCI Tx retries from interfering with C3 CPU state */ 4016 * PCI Tx retries from interfering with C3 CPU state */
4058 pci_write_config_byte(pdev, 0x41, 0x00); 4017 pci_write_config_byte(pdev, 0x41, 0x00);
4059 4018
4060 /* this spin lock will be used in apm_ops.init and EEPROM access 4019 /* these spin locks will be used in apm_ops.init and EEPROM access
4061 * we should init now 4020 * we should init now
4062 */ 4021 */
4063 spin_lock_init(&priv->reg_lock); 4022 spin_lock_init(&priv->reg_lock);
4023 spin_lock_init(&priv->lock);
4064 4024
4065 /*********************** 4025 /***********************
4066 * 4. Read EEPROM 4026 * 4. Read EEPROM
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 777584d76a88..1e41ad0fcad5 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -973,6 +973,10 @@ int iwm_send_pmkid_update(struct iwm_priv *iwm,
973 973
974 memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); 974 memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
975 975
976 update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE;
977 update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) -
978 sizeof(struct iwm_umac_wifi_if));
979
976 update.command = cpu_to_le32(command); 980 update.command = cpu_to_le32(command);
977 if (pmksa->bssid) 981 if (pmksa->bssid)
978 memcpy(&update.bssid, pmksa->bssid, ETH_ALEN); 982 memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 06af0552cd75..3dfd9f0e9003 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -463,6 +463,7 @@ struct iwm_umac_cmd_stop_resume_tx {
463#define IWM_CMD_PMKID_FLUSH 3 463#define IWM_CMD_PMKID_FLUSH 3
464 464
465struct iwm_umac_pmkid_update { 465struct iwm_umac_pmkid_update {
466 struct iwm_umac_wifi_if hdr;
466 __le32 command; 467 __le32 command;
467 u8 bssid[ETH_ALEN]; 468 u8 bssid[ETH_ALEN];
468 __le16 reserved; 469 __le16 reserved;
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 5a26bb05a33a..842811142bef 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -268,7 +268,7 @@ struct iwm_priv {
268 268
269 struct sk_buff_head rx_list; 269 struct sk_buff_head rx_list;
270 struct list_head rx_tickets; 270 struct list_head rx_tickets;
271 struct list_head rx_packets[IWM_RX_ID_HASH]; 271 struct list_head rx_packets[IWM_RX_ID_HASH + 1];
272 struct workqueue_struct *rx_wq; 272 struct workqueue_struct *rx_wq;
273 struct work_struct rx_worker; 273 struct work_struct rx_worker;
274 274
@@ -349,7 +349,7 @@ int iwm_up(struct iwm_priv *iwm);
349int iwm_down(struct iwm_priv *iwm); 349int iwm_down(struct iwm_priv *iwm);
350 350
351/* TX API */ 351/* TX API */
352u16 iwm_tid_to_queue(u16 tid); 352int iwm_tid_to_queue(u16 tid);
353void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages); 353void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
354void iwm_tx_worker(struct work_struct *work); 354void iwm_tx_worker(struct work_struct *work);
355int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 355int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index e4f0f8705f65..c4c0d23c63ec 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -76,7 +76,7 @@ static int iwm_stop(struct net_device *ndev)
76 */ 76 */
77static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; 77static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
78 78
79u16 iwm_tid_to_queue(u16 tid) 79int iwm_tid_to_queue(u16 tid)
80{ 80{
81 if (tid > IWM_UMAC_TID_NR - 2) 81 if (tid > IWM_UMAC_TID_NR - 2)
82 return -EINVAL; 82 return -EINVAL;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 1c57c1f72cba..f727b4a83196 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
794 } 794 }
795 795
796 bss->bss = kzalloc(bss_len, GFP_KERNEL); 796 bss->bss = kzalloc(bss_len, GFP_KERNEL);
797 if (!bss) { 797 if (!bss->bss) {
798 kfree(bss); 798 kfree(bss);
799 IWM_ERR(iwm, "Couldn't allocate bss\n"); 799 IWM_ERR(iwm, "Couldn't allocate bss\n");
800 return -ENOMEM; 800 return -ENOMEM;
@@ -1126,7 +1126,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
1126 1126
1127 if (!stop) { 1127 if (!stop) {
1128 struct iwm_tx_queue *txq; 1128 struct iwm_tx_queue *txq;
1129 u16 queue = iwm_tid_to_queue(bit); 1129 int queue = iwm_tid_to_queue(bit);
1130 1130
1131 if (queue < 0) 1131 if (queue < 0)
1132 continue; 1132 continue;
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 2f91c9b808af..92b7a357a5e4 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -2,6 +2,7 @@
2#include <linux/delay.h> 2#include <linux/delay.h>
3#include <linux/etherdevice.h> 3#include <linux/etherdevice.h>
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/if_ether.h>
5#include <linux/if_arp.h> 6#include <linux/if_arp.h>
6#include <linux/kthread.h> 7#include <linux/kthread.h>
7#include <linux/kfifo.h> 8#include <linux/kfifo.h>
@@ -351,8 +352,7 @@ int lbs_add_mesh(struct lbs_private *priv)
351 352
352 mesh_dev->netdev_ops = &mesh_netdev_ops; 353 mesh_dev->netdev_ops = &mesh_netdev_ops;
353 mesh_dev->ethtool_ops = &lbs_ethtool_ops; 354 mesh_dev->ethtool_ops = &lbs_ethtool_ops;
354 memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, 355 memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
355 sizeof(priv->dev->dev_addr));
356 356
357 SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); 357 SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
358 358
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index c6a6c042b82f..b0b1c7841500 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -567,11 +567,8 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan)
567 chan_count = lbs_scan_create_channel_list(priv, chan_list); 567 chan_count = lbs_scan_create_channel_list(priv, chan_list);
568 568
569 netif_stop_queue(priv->dev); 569 netif_stop_queue(priv->dev);
570 netif_carrier_off(priv->dev); 570 if (priv->mesh_dev)
571 if (priv->mesh_dev) {
572 netif_stop_queue(priv->mesh_dev); 571 netif_stop_queue(priv->mesh_dev);
573 netif_carrier_off(priv->mesh_dev);
574 }
575 572
576 /* Prepare to continue an interrupted scan */ 573 /* Prepare to continue an interrupted scan */
577 lbs_deb_scan("chan_count %d, scan_channel %d\n", 574 lbs_deb_scan("chan_count %d, scan_channel %d\n",
@@ -635,16 +632,13 @@ out2:
635 priv->scan_channel = 0; 632 priv->scan_channel = 0;
636 633
637out: 634out:
638 if (priv->connect_status == LBS_CONNECTED) { 635 if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
639 netif_carrier_on(priv->dev); 636 netif_wake_queue(priv->dev);
640 if (!priv->tx_pending_len) 637
641 netif_wake_queue(priv->dev); 638 if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) &&
642 } 639 !priv->tx_pending_len)
643 if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) { 640 netif_wake_queue(priv->mesh_dev);
644 netif_carrier_on(priv->mesh_dev); 641
645 if (!priv->tx_pending_len)
646 netif_wake_queue(priv->mesh_dev);
647 }
648 kfree(chan_list); 642 kfree(chan_list);
649 643
650 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); 644 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index a8eb9e1fcf36..4b1aab593a84 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -2025,10 +2025,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
2025 if (priv->connect_status == LBS_CONNECTED) { 2025 if (priv->connect_status == LBS_CONNECTED) {
2026 memcpy(extra, priv->curbssparams.ssid, 2026 memcpy(extra, priv->curbssparams.ssid,
2027 priv->curbssparams.ssid_len); 2027 priv->curbssparams.ssid_len);
2028 extra[priv->curbssparams.ssid_len] = '\0';
2029 } else { 2028 } else {
2030 memset(extra, 0, 32); 2029 memset(extra, 0, 32);
2031 extra[priv->curbssparams.ssid_len] = '\0';
2032 } 2030 }
2033 /* 2031 /*
2034 * If none, we may want to get the one that was set 2032 * If none, we may want to get the one that was set
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 019431d2f8a9..26a1abd5bb03 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -495,7 +495,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
495 stats.band = IEEE80211_BAND_2GHZ; 495 stats.band = IEEE80211_BAND_2GHZ;
496 stats.signal = prxpd->snr; 496 stats.signal = prxpd->snr;
497 stats.noise = prxpd->nf; 497 stats.noise = prxpd->nf;
498 stats.qual = prxpd->snr - prxpd->nf;
499 /* Marvell rate index has a hole at value 4 */ 498 /* Marvell rate index has a hole at value 4 */
500 if (prxpd->rx_rate > 4) 499 if (prxpd->rx_rate > 4)
501 --prxpd->rx_rate; 500 --prxpd->rx_rate;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 59d49159cf2a..59f92105b0c2 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -3157,8 +3157,10 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3157 /* Clear unsupported feature flags */ 3157 /* Clear unsupported feature flags */
3158 *total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC; 3158 *total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
3159 3159
3160 if (mwl8k_fw_lock(hw)) 3160 if (mwl8k_fw_lock(hw)) {
3161 kfree(cmd);
3161 return; 3162 return;
3163 }
3162 3164
3163 if (priv->sniffer_enabled) { 3165 if (priv->sniffer_enabled) {
3164 mwl8k_enable_sniffer(hw, 0); 3166 mwl8k_enable_sniffer(hw, 0);
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 7698fdd6a3a2..31ca241f7753 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -23,7 +23,7 @@
23#define MAX_RID_LEN 1024 23#define MAX_RID_LEN 1024
24 24
25/* Helper routine to record keys 25/* Helper routine to record keys
26 * Do not call from interrupt context */ 26 * It is called under orinoco_lock so it may not sleep */
27static int orinoco_set_key(struct orinoco_private *priv, int index, 27static int orinoco_set_key(struct orinoco_private *priv, int index,
28 enum orinoco_alg alg, const u8 *key, int key_len, 28 enum orinoco_alg alg, const u8 *key, int key_len,
29 const u8 *seq, int seq_len) 29 const u8 *seq, int seq_len)
@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
32 kzfree(priv->keys[index].seq); 32 kzfree(priv->keys[index].seq);
33 33
34 if (key_len) { 34 if (key_len) {
35 priv->keys[index].key = kzalloc(key_len, GFP_KERNEL); 35 priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
36 if (!priv->keys[index].key) 36 if (!priv->keys[index].key)
37 goto nomem; 37 goto nomem;
38 } else 38 } else
39 priv->keys[index].key = NULL; 39 priv->keys[index].key = NULL;
40 40
41 if (seq_len) { 41 if (seq_len) {
42 priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL); 42 priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
43 if (!priv->keys[index].seq) 43 if (!priv->keys[index].seq)
44 goto free_key; 44 goto free_key;
45 } else 45 } else
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a15962a19b2a..a72f7c2577de 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -197,6 +197,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
197 i %= ring_limit; 197 i %= ring_limit;
198 continue; 198 continue;
199 } 199 }
200
201 if (unlikely(len > priv->common.rx_mtu)) {
202 if (net_ratelimit())
203 dev_err(&priv->pdev->dev, "rx'd frame size "
204 "exceeds length threshold.\n");
205
206 len = priv->common.rx_mtu;
207 }
200 skb_put(skb, len); 208 skb_put(skb, len);
201 209
202 if (p54_rx(dev, skb)) { 210 if (p54_rx(dev, skb)) {
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index c5fe867665e6..1a7eae357fef 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1323,7 +1323,7 @@
1323#define PAIRWISE_KEY_ENTRY(__idx) \ 1323#define PAIRWISE_KEY_ENTRY(__idx) \
1324 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) ) 1324 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1325#define MAC_IVEIV_ENTRY(__idx) \ 1325#define MAC_IVEIV_ENTRY(__idx) \
1326 ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) ) 1326 ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) )
1327#define MAC_WCID_ATTR_ENTRY(__idx) \ 1327#define MAC_WCID_ATTR_ENTRY(__idx) \
1328 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) ) 1328 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
1329#define SHARED_KEY_ENTRY(__idx) \ 1329#define SHARED_KEY_ENTRY(__idx) \
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index eb1e1d00bec3..9deae41cb784 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -37,7 +37,7 @@
37#include <linux/module.h> 37#include <linux/module.h>
38 38
39#include "rt2x00.h" 39#include "rt2x00.h"
40#ifdef CONFIG_RT2800USB 40#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
41#include "rt2x00usb.h" 41#include "rt2x00usb.h"
42#endif 42#endif
43#include "rt2800lib.h" 43#include "rt2800lib.h"
@@ -340,7 +340,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
340 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off); 340 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
341 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3); 341 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
342 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3); 342 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
343 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12); 343 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
344 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3); 344 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
345 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1); 345 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
346 rt2800_register_write(led->rt2x00dev, LED_CFG, reg); 346 rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
@@ -1121,7 +1121,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1121 1121
1122 if (rt2x00_intf_is_usb(rt2x00dev)) { 1122 if (rt2x00_intf_is_usb(rt2x00dev)) {
1123 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); 1123 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
1124#ifdef CONFIG_RT2800USB 1124#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
1125 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, 1125 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
1126 USB_MODE_RESET, REGISTER_TIMEOUT); 1126 USB_MODE_RESET, REGISTER_TIMEOUT);
1127#endif 1127#endif
@@ -2022,6 +2022,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2022 u16 eeprom; 2022 u16 eeprom;
2023 2023
2024 /* 2024 /*
2025 * Disable powersaving as default on PCI devices.
2026 */
2027 if (rt2x00_intf_is_pci(rt2x00dev))
2028 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2029
2030 /*
2025 * Initialize all hw fields. 2031 * Initialize all hw fields.
2026 */ 2032 */
2027 rt2x00dev->hw->flags = 2033 rt2x00dev->hw->flags =
@@ -2074,8 +2080,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2074 IEEE80211_HT_CAP_SGI_20 | 2080 IEEE80211_HT_CAP_SGI_20 |
2075 IEEE80211_HT_CAP_SGI_40 | 2081 IEEE80211_HT_CAP_SGI_40 |
2076 IEEE80211_HT_CAP_TX_STBC | 2082 IEEE80211_HT_CAP_TX_STBC |
2077 IEEE80211_HT_CAP_RX_STBC | 2083 IEEE80211_HT_CAP_RX_STBC;
2078 IEEE80211_HT_CAP_PSMP_SUPPORT;
2079 spec->ht.ampdu_factor = 3; 2084 spec->ht.ampdu_factor = 3;
2080 spec->ht.ampdu_density = 4; 2085 spec->ht.ampdu_density = 4;
2081 spec->ht.mcs.tx_params = 2086 spec->ht.mcs.tx_params =
@@ -2140,8 +2145,8 @@ static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
2140 rt2800_register_multiread(rt2x00dev, offset, 2145 rt2800_register_multiread(rt2x00dev, offset,
2141 &iveiv_entry, sizeof(iveiv_entry)); 2146 &iveiv_entry, sizeof(iveiv_entry));
2142 2147
2143 memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16)); 2148 memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
2144 memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32)); 2149 memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
2145} 2150}
2146 2151
2147static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 2152static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index af85d18cdbe7..ab95346cf6a3 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -922,6 +922,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
922 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, 922 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
923 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, 923 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
924 { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) }, 924 { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
925 { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
925 /* Logitec */ 926 /* Logitec */
926 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) }, 927 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
927 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) }, 928 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 4d841c07c970..dcfc8c25d1a7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -113,6 +113,12 @@
113 ( ((unsigned long)((__skb)->data + (__header))) & 3 ) 113 ( ((unsigned long)((__skb)->data + (__header))) & 3 )
114 114
115/* 115/*
116 * Constants for extra TX headroom for alignment purposes.
117 */
118#define RT2X00_ALIGN_SIZE 4 /* Only whole frame needs alignment */
119#define RT2X00_L2PAD_SIZE 8 /* Both header & payload need alignment */
120
121/*
116 * Standard timing and size defines. 122 * Standard timing and size defines.
117 * These values should follow the ieee80211 specifications. 123 * These values should follow the ieee80211 specifications.
118 */ 124 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 06c43ca39bf8..265e66dba552 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -686,7 +686,17 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
686 /* 686 /*
687 * Initialize extra TX headroom required. 687 * Initialize extra TX headroom required.
688 */ 688 */
689 rt2x00dev->hw->extra_tx_headroom = rt2x00dev->ops->extra_tx_headroom; 689 rt2x00dev->hw->extra_tx_headroom =
690 max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
691 rt2x00dev->ops->extra_tx_headroom);
692
693 /*
694 * Take TX headroom required for alignment into account.
695 */
696 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
697 rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
698 else if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
699 rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
690 700
691 /* 701 /*
692 * Register HW. 702 * Register HW.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 239afc7a9c0b..9915a09141ef 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -104,7 +104,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
104 * is also mapped to the DMA so it can be used for transfering 104 * is also mapped to the DMA so it can be used for transfering
105 * additional descriptor information to the hardware. 105 * additional descriptor information to the hardware.
106 */ 106 */
107 skb_push(skb, rt2x00dev->hw->extra_tx_headroom); 107 skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
108 108
109 skbdesc->skb_dma = 109 skbdesc->skb_dma =
110 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE); 110 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
@@ -112,7 +112,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
112 /* 112 /*
113 * Restore data pointer to original location again. 113 * Restore data pointer to original location again.
114 */ 114 */
115 skb_pull(skb, rt2x00dev->hw->extra_tx_headroom); 115 skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
116 116
117 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; 117 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
118} 118}
@@ -134,7 +134,7 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
134 * by the driver, but it was actually mapped to DMA. 134 * by the driver, but it was actually mapped to DMA.
135 */ 135 */
136 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, 136 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
137 skb->len + rt2x00dev->hw->extra_tx_headroom, 137 skb->len + rt2x00dev->ops->extra_tx_headroom,
138 DMA_TO_DEVICE); 138 DMA_TO_DEVICE);
139 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; 139 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
140 } 140 }
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 687e17dc2e9f..0ca589306d71 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2539,6 +2539,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2539 unsigned int i; 2539 unsigned int i;
2540 2540
2541 /* 2541 /*
2542 * Disable powersaving as default.
2543 */
2544 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2545
2546 /*
2542 * Initialize all hw fields. 2547 * Initialize all hw fields.
2543 */ 2548 */
2544 rt2x00dev->hw->flags = 2549 rt2x00dev->hw->flags =
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index a1a3dd15c664..8a40a1439984 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -132,7 +132,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
132 132
133 rx_status.antenna = (flags2 >> 15) & 1; 133 rx_status.antenna = (flags2 >> 15) & 1;
134 /* TODO: improve signal/rssi reporting */ 134 /* TODO: improve signal/rssi reporting */
135 rx_status.qual = flags2 & 0xFF;
136 rx_status.signal = (flags2 >> 8) & 0x7F; 135 rx_status.signal = (flags2 >> 8) & 0x7F;
137 /* XXX: is this correct? */ 136 /* XXX: is this correct? */
138 rx_status.rate_idx = (flags >> 20) & 0xF; 137 rx_status.rate_idx = (flags >> 20) & 0xF;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bc5726dd5fe4..7ba3052b0708 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
65 /* Sitecom */ 65 /* Sitecom */
66 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, 66 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
67 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, 67 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
68 {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B},
68 /* Sphairon Access Systems GmbH */ 69 /* Sphairon Access Systems GmbH */
69 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, 70 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
70 /* Dick Smith Electronics */ 71 /* Dick Smith Electronics */
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index 2e733e7bdfd4..28a808674080 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -256,7 +256,7 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
256 } 256 }
257 } 257 }
258 258
259 if (loop >= INIT_LOOP) { 259 if (loop > INIT_LOOP) {
260 wl1251_error("timeout waiting for the hardware to " 260 wl1251_error("timeout waiting for the hardware to "
261 "complete initialization"); 261 "complete initialization");
262 return -EIO; 262 return -EIO;
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 886a9bc39cc1..c3385b3d246c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -777,7 +777,7 @@ out:
777 return ret; 777 return ret;
778} 778}
779 779
780static int wl1271_build_basic_rates(char *rates, u8 band) 780static int wl1271_build_basic_rates(u8 *rates, u8 band)
781{ 781{
782 u8 index = 0; 782 u8 index = 0;
783 783
@@ -804,7 +804,7 @@ static int wl1271_build_basic_rates(char *rates, u8 band)
804 return index; 804 return index;
805} 805}
806 806
807static int wl1271_build_extended_rates(char *rates, u8 band) 807static int wl1271_build_extended_rates(u8 *rates, u8 band)
808{ 808{
809 u8 index = 0; 809 u8 index = 0;
810 810
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index dfa1b9bc22c8..7ca95c414fa8 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -1325,151 +1325,11 @@ int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
1325 return r; 1325 return r;
1326} 1326}
1327 1327
1328static int ofdm_qual_db(u8 status_quality, u8 zd_rate, unsigned int size)
1329{
1330 static const u16 constants[] = {
1331 715, 655, 585, 540, 470, 410, 360, 315,
1332 270, 235, 205, 175, 150, 125, 105, 85,
1333 65, 50, 40, 25, 15
1334 };
1335
1336 int i;
1337 u32 x;
1338
1339 /* It seems that their quality parameter is somehow per signal
1340 * and is now transferred per bit.
1341 */
1342 switch (zd_rate) {
1343 case ZD_OFDM_RATE_6M:
1344 case ZD_OFDM_RATE_12M:
1345 case ZD_OFDM_RATE_24M:
1346 size *= 2;
1347 break;
1348 case ZD_OFDM_RATE_9M:
1349 case ZD_OFDM_RATE_18M:
1350 case ZD_OFDM_RATE_36M:
1351 case ZD_OFDM_RATE_54M:
1352 size *= 4;
1353 size /= 3;
1354 break;
1355 case ZD_OFDM_RATE_48M:
1356 size *= 3;
1357 size /= 2;
1358 break;
1359 default:
1360 return -EINVAL;
1361 }
1362
1363 x = (10000 * status_quality)/size;
1364 for (i = 0; i < ARRAY_SIZE(constants); i++) {
1365 if (x > constants[i])
1366 break;
1367 }
1368
1369 switch (zd_rate) {
1370 case ZD_OFDM_RATE_6M:
1371 case ZD_OFDM_RATE_9M:
1372 i += 3;
1373 break;
1374 case ZD_OFDM_RATE_12M:
1375 case ZD_OFDM_RATE_18M:
1376 i += 5;
1377 break;
1378 case ZD_OFDM_RATE_24M:
1379 case ZD_OFDM_RATE_36M:
1380 i += 9;
1381 break;
1382 case ZD_OFDM_RATE_48M:
1383 case ZD_OFDM_RATE_54M:
1384 i += 15;
1385 break;
1386 default:
1387 return -EINVAL;
1388 }
1389
1390 return i;
1391}
1392
1393static int ofdm_qual_percent(u8 status_quality, u8 zd_rate, unsigned int size)
1394{
1395 int r;
1396
1397 r = ofdm_qual_db(status_quality, zd_rate, size);
1398 ZD_ASSERT(r >= 0);
1399 if (r < 0)
1400 r = 0;
1401
1402 r = (r * 100)/29;
1403 return r <= 100 ? r : 100;
1404}
1405
1406static unsigned int log10times100(unsigned int x)
1407{
1408 static const u8 log10[] = {
1409 0,
1410 0, 30, 47, 60, 69, 77, 84, 90, 95, 100,
1411 104, 107, 111, 114, 117, 120, 123, 125, 127, 130,
1412 132, 134, 136, 138, 139, 141, 143, 144, 146, 147,
1413 149, 150, 151, 153, 154, 155, 156, 157, 159, 160,
1414 161, 162, 163, 164, 165, 166, 167, 168, 169, 169,
1415 170, 171, 172, 173, 174, 174, 175, 176, 177, 177,
1416 178, 179, 179, 180, 181, 181, 182, 183, 183, 184,
1417 185, 185, 186, 186, 187, 188, 188, 189, 189, 190,
1418 190, 191, 191, 192, 192, 193, 193, 194, 194, 195,
1419 195, 196, 196, 197, 197, 198, 198, 199, 199, 200,
1420 200, 200, 201, 201, 202, 202, 202, 203, 203, 204,
1421 204, 204, 205, 205, 206, 206, 206, 207, 207, 207,
1422 208, 208, 208, 209, 209, 210, 210, 210, 211, 211,
1423 211, 212, 212, 212, 213, 213, 213, 213, 214, 214,
1424 214, 215, 215, 215, 216, 216, 216, 217, 217, 217,
1425 217, 218, 218, 218, 219, 219, 219, 219, 220, 220,
1426 220, 220, 221, 221, 221, 222, 222, 222, 222, 223,
1427 223, 223, 223, 224, 224, 224, 224,
1428 };
1429
1430 return x < ARRAY_SIZE(log10) ? log10[x] : 225;
1431}
1432
1433enum {
1434 MAX_CCK_EVM_DB = 45,
1435};
1436
1437static int cck_evm_db(u8 status_quality)
1438{
1439 return (20 * log10times100(status_quality)) / 100;
1440}
1441
1442static int cck_snr_db(u8 status_quality)
1443{
1444 int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality);
1445 ZD_ASSERT(r >= 0);
1446 return r;
1447}
1448
1449static int cck_qual_percent(u8 status_quality)
1450{
1451 int r;
1452
1453 r = cck_snr_db(status_quality);
1454 r = (100*r)/17;
1455 return r <= 100 ? r : 100;
1456}
1457
1458static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame) 1328static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame)
1459{ 1329{
1460 return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame); 1330 return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame);
1461} 1331}
1462 1332
1463u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
1464 const struct rx_status *status)
1465{
1466 return (status->frame_status&ZD_RX_OFDM) ?
1467 ofdm_qual_percent(status->signal_quality_ofdm,
1468 zd_rate_from_ofdm_plcp_header(rx_frame),
1469 size) :
1470 cck_qual_percent(status->signal_quality_cck);
1471}
1472
1473/** 1333/**
1474 * zd_rx_rate - report zd-rate 1334 * zd_rx_rate - report zd-rate
1475 * @rx_frame - received frame 1335 * @rx_frame - received frame
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 9fd8f3508d66..f8bbf7d302ae 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -929,9 +929,6 @@ static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
929 929
930struct rx_status; 930struct rx_status;
931 931
932u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
933 const struct rx_status *status);
934
935u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status); 932u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
936 933
937struct zd_mc_hash { 934struct zd_mc_hash {
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index cf51e8f8174b..f14deb0c8514 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -828,9 +828,6 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
828 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq; 828 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
829 stats.band = IEEE80211_BAND_2GHZ; 829 stats.band = IEEE80211_BAND_2GHZ;
830 stats.signal = status->signal_strength; 830 stats.signal = status->signal_strength;
831 stats.qual = zd_rx_qual_percent(buffer,
832 length - sizeof(struct rx_status),
833 status);
834 831
835 rate = zd_rx_rate(buffer, status); 832 rate = zd_rx_rate(buffer, status);
836 833
@@ -990,12 +987,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
990 changed_flags &= SUPPORTED_FIF_FLAGS; 987 changed_flags &= SUPPORTED_FIF_FLAGS;
991 *new_flags &= SUPPORTED_FIF_FLAGS; 988 *new_flags &= SUPPORTED_FIF_FLAGS;
992 989
993 /* changed_flags is always populated but this driver 990 /*
994 * doesn't support all FIF flags so its possible we don't 991 * If multicast parameter (as returned by zd_op_prepare_multicast)
995 * need to do anything */ 992 * has changed, no bit in changed_flags is set. To handle this
996 if (!changed_flags) 993 * situation, we do not return if changed_flags is 0. If we do so,
997 return; 994 * we will have some issue with IPv6 which uses multicast for link
998 995 * layer address resolution.
996 */
999 if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)) 997 if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
1000 zd_mc_add_all(&hash); 998 zd_mc_add_all(&hash);
1001 999
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index ac19ecd19cfe..72d3e437e190 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = {
62 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 62 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
63 /* ZD1211B */ 63 /* ZD1211B */
64 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, 64 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, 66 { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
66 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, 67 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
67 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 68 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 8e952fdab764..cb2fd01eddae 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func)
720 -ret_val); 720 -ret_val);
721 goto acpiphp_bus_add_out; 721 goto acpiphp_bus_add_out;
722 } 722 }
723 /*
724 * try to start anyway. We could have failed to add
725 * simply because this bus had previously been added
726 * on another add. Don't bother with the return value
727 * we just keep going.
728 */
729 ret_val = acpi_bus_start(device); 723 ret_val = acpi_bus_start(device);
730 724
731acpiphp_bus_add_out: 725acpiphp_bus_add_out:
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index bd588eb8e922..8e210cd76e55 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -121,7 +121,7 @@ struct controller {
121#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 121#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
122#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458 122#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
123 123
124/* AMD PCIX bridge registers */ 124/* AMD PCI-X bridge registers */
125#define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C 125#define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C
126#define PCIX_MISCII_OFFSET 0x48 126#define PCIX_MISCII_OFFSET 0x48
127#define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80 127#define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index e56f9bed6f2b..417312528ddf 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -305,7 +305,7 @@ struct device_domain_info {
305 int segment; /* PCI domain */ 305 int segment; /* PCI domain */
306 u8 bus; /* PCI bus number */ 306 u8 bus; /* PCI bus number */
307 u8 devfn; /* PCI devfn number */ 307 u8 devfn; /* PCI devfn number */
308 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ 308 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
309 struct intel_iommu *iommu; /* IOMMU used by this device */ 309 struct intel_iommu *iommu; /* IOMMU used by this device */
310 struct dmar_domain *domain; /* pointer to domain */ 310 struct dmar_domain *domain; /* pointer to domain */
311}; 311};
@@ -1604,7 +1604,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1604 return ret; 1604 return ret;
1605 parent = parent->bus->self; 1605 parent = parent->bus->self;
1606 } 1606 }
1607 if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */ 1607 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1608 return domain_context_mapping_one(domain, 1608 return domain_context_mapping_one(domain,
1609 pci_domain_nr(tmp->subordinate), 1609 pci_domain_nr(tmp->subordinate),
1610 tmp->subordinate->number, 0, 1610 tmp->subordinate->number, 0,
@@ -3325,7 +3325,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3325 parent->devfn); 3325 parent->devfn);
3326 parent = parent->bus->self; 3326 parent = parent->bus->self;
3327 } 3327 }
3328 if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */ 3328 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3329 iommu_detach_dev(iommu, 3329 iommu_detach_dev(iommu,
3330 tmp->subordinate->number, 0); 3330 tmp->subordinate->number, 0);
3331 else /* this is a legacy PCI bridge */ 3331 else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 8b65a489581b..95b849130ad4 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -528,7 +528,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev)
528 528
529 bridge = pci_find_upstream_pcie_bridge(dev); 529 bridge = pci_find_upstream_pcie_bridge(dev);
530 if (bridge) { 530 if (bridge) {
531 if (pci_is_pcie(bridge))/* this is a PCIE-to-PCI/PCIX bridge */ 531 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
532 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, 532 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
533 (bridge->bus->number << 8) | dev->bus->number); 533 (bridge->bus->number << 8) | dev->bus->number);
534 else /* this is a legacy PCI bridge */ 534 else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index cc617ddd33d0..7e2829538a4c 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -112,11 +112,7 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev)
112static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) 112static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
113{ 113{
114 while (bus->parent) { 114 while (bus->parent) {
115 struct pci_dev *bridge = bus->self; 115 if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
116 int ret;
117
118 ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
119 if (!ret || pci_is_pcie(bridge))
120 return; 116 return;
121 bus = bus->parent; 117 bus = bus->parent;
122 } 118 }
@@ -131,9 +127,7 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
131 if (acpi_pci_can_wakeup(dev)) 127 if (acpi_pci_can_wakeup(dev))
132 return acpi_pm_device_sleep_wake(&dev->dev, enable); 128 return acpi_pm_device_sleep_wake(&dev->dev, enable);
133 129
134 if (!pci_is_pcie(dev)) 130 acpi_pci_propagate_wakeup_enable(dev->bus, enable);
135 acpi_pci_propagate_wakeup_enable(dev->bus, enable);
136
137 return 0; 131 return 0;
138} 132}
139 133
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c5df94e86678..807224ec8351 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -75,7 +75,8 @@ static ssize_t local_cpus_show(struct device *dev,
75 int len; 75 int len;
76 76
77#ifdef CONFIG_NUMA 77#ifdef CONFIG_NUMA
78 mask = cpumask_of_node(dev_to_node(dev)); 78 mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
79 cpumask_of_node(dev_to_node(dev));
79#else 80#else
80 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 81 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
81#endif 82#endif
@@ -93,7 +94,8 @@ static ssize_t local_cpulist_show(struct device *dev,
93 int len; 94 int len;
94 95
95#ifdef CONFIG_NUMA 96#ifdef CONFIG_NUMA
96 mask = cpumask_of_node(dev_to_node(dev)); 97 mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
98 cpumask_of_node(dev_to_node(dev));
97#else 99#else
98 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 100 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
99#endif 101#endif
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0bc27e059019..315fea47e784 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -29,7 +29,17 @@ const char *pci_power_names[] = {
29}; 29};
30EXPORT_SYMBOL_GPL(pci_power_names); 30EXPORT_SYMBOL_GPL(pci_power_names);
31 31
32unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 32unsigned int pci_pm_d3_delay;
33
34static void pci_dev_d3_sleep(struct pci_dev *dev)
35{
36 unsigned int delay = dev->d3_delay;
37
38 if (delay < pci_pm_d3_delay)
39 delay = pci_pm_d3_delay;
40
41 msleep(delay);
42}
33 43
34#ifdef CONFIG_PCI_DOMAINS 44#ifdef CONFIG_PCI_DOMAINS
35int pci_domains_supported = 1; 45int pci_domains_supported = 1;
@@ -522,7 +532,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
522 /* Mandatory power management transition delays */ 532 /* Mandatory power management transition delays */
523 /* see PCI PM 1.1 5.6.1 table 18 */ 533 /* see PCI PM 1.1 5.6.1 table 18 */
524 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 534 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
525 msleep(pci_pm_d3_delay); 535 pci_dev_d3_sleep(dev);
526 else if (state == PCI_D2 || dev->current_state == PCI_D2) 536 else if (state == PCI_D2 || dev->current_state == PCI_D2)
527 udelay(PCI_PM_D2_DELAY); 537 udelay(PCI_PM_D2_DELAY);
528 538
@@ -1153,11 +1163,11 @@ pci_disable_device(struct pci_dev *dev)
1153 1163
1154/** 1164/**
1155 * pcibios_set_pcie_reset_state - set reset state for device dev 1165 * pcibios_set_pcie_reset_state - set reset state for device dev
1156 * @dev: the PCI-E device reset 1166 * @dev: the PCIe device reset
1157 * @state: Reset state to enter into 1167 * @state: Reset state to enter into
1158 * 1168 *
1159 * 1169 *
1160 * Sets the PCI-E reset state for the device. This is the default 1170 * Sets the PCIe reset state for the device. This is the default
1161 * implementation. Architecture implementations can override this. 1171 * implementation. Architecture implementations can override this.
1162 */ 1172 */
1163int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, 1173int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
@@ -1168,7 +1178,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1168 1178
1169/** 1179/**
1170 * pci_set_pcie_reset_state - set reset state for device dev 1180 * pci_set_pcie_reset_state - set reset state for device dev
1171 * @dev: the PCI-E device reset 1181 * @dev: the PCIe device reset
1172 * @state: Reset state to enter into 1182 * @state: Reset state to enter into
1173 * 1183 *
1174 * 1184 *
@@ -1409,6 +1419,7 @@ void pci_pm_init(struct pci_dev *dev)
1409 } 1419 }
1410 1420
1411 dev->pm_cap = pm; 1421 dev->pm_cap = pm;
1422 dev->d3_delay = PCI_PM_D3_WAIT;
1412 1423
1413 dev->d1_support = false; 1424 dev->d1_support = false;
1414 dev->d2_support = false; 1425 dev->d2_support = false;
@@ -2247,12 +2258,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
2247 csr &= ~PCI_PM_CTRL_STATE_MASK; 2258 csr &= ~PCI_PM_CTRL_STATE_MASK;
2248 csr |= PCI_D3hot; 2259 csr |= PCI_D3hot;
2249 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2260 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2250 msleep(pci_pm_d3_delay); 2261 pci_dev_d3_sleep(dev);
2251 2262
2252 csr &= ~PCI_PM_CTRL_STATE_MASK; 2263 csr &= ~PCI_PM_CTRL_STATE_MASK;
2253 csr |= PCI_D0; 2264 csr |= PCI_D0;
2254 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2265 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2255 msleep(pci_pm_d3_delay); 2266 pci_dev_d3_sleep(dev);
2256 2267
2257 return 0; 2268 return 0;
2258} 2269}
@@ -2296,6 +2307,10 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
2296 down(&dev->dev.sem); 2307 down(&dev->dev.sem);
2297 } 2308 }
2298 2309
2310 rc = pci_dev_specific_reset(dev, probe);
2311 if (rc != -ENOTTY)
2312 goto done;
2313
2299 rc = pcie_flr(dev, probe); 2314 rc = pcie_flr(dev, probe);
2300 if (rc != -ENOTTY) 2315 if (rc != -ENOTTY)
2301 goto done; 2316 goto done;
@@ -2779,6 +2794,11 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
2779 return 1; 2794 return 1;
2780} 2795}
2781 2796
2797void __weak pci_fixup_cardbus(struct pci_bus *bus)
2798{
2799}
2800EXPORT_SYMBOL(pci_fixup_cardbus);
2801
2782static int __init pci_setup(char *str) 2802static int __init pci_setup(char *str)
2783{ 2803{
2784 while (str) { 2804 while (str) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 33ed8e0aba1e..fbd0e3adbca3 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -313,4 +313,12 @@ static inline int pci_resource_alignment(struct pci_dev *dev,
313 313
314extern void pci_enable_acs(struct pci_dev *dev); 314extern void pci_enable_acs(struct pci_dev *dev);
315 315
316struct pci_dev_reset_methods {
317 u16 vendor;
318 u16 device;
319 int (*reset)(struct pci_dev *dev, int probe);
320};
321
322extern int pci_dev_specific_reset(struct pci_dev *dev, int probe);
323
316#endif /* DRIVERS_PCI_H */ 324#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
index b8c925c1f6aa..9142949734f5 100644
--- a/drivers/pci/pcie/aer/Kconfig.debug
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -3,14 +3,14 @@
3# 3#
4 4
5config PCIEAER_INJECT 5config PCIEAER_INJECT
6 tristate "PCIE AER error injector support" 6 tristate "PCIe AER error injector support"
7 depends on PCIEAER 7 depends on PCIEAER
8 default n 8 default n
9 help 9 help
10 This enables PCI Express Root Port Advanced Error Reporting 10 This enables PCI Express Root Port Advanced Error Reporting
11 (AER) software error injector. 11 (AER) software error injector.
12 12
13 Debuging PCIE AER code is quite difficult because it is hard 13 Debugging PCIe AER code is quite difficult because it is hard
14 to trigger various real hardware errors. Software based 14 to trigger various real hardware errors. Software based
15 error injection can fake almost all kinds of errors with the 15 error injection can fake almost all kinds of errors with the
16 help of a user space helper tool aer-inject, which can be 16 help of a user space helper tool aer-inject, which can be
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 7fcd5331b14c..223052b73563 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * PCIE AER software error injection support. 2 * PCIe AER software error injection support.
3 * 3 *
4 * Debuging PCIE AER code is quite difficult because it is hard to 4 * Debuging PCIe AER code is quite difficult because it is hard to
5 * trigger various real hardware errors. Software based error 5 * trigger various real hardware errors. Software based error
6 * injection can fake almost all kinds of errors with the help of a 6 * injection can fake almost all kinds of errors with the help of a
7 * user space helper tool aer-inject, which can be gotten from: 7 * user space helper tool aer-inject, which can be gotten from:
@@ -321,7 +321,7 @@ static int aer_inject(struct aer_error_inj *einj)
321 unsigned long flags; 321 unsigned long flags;
322 unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); 322 unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
323 int pos_cap_err, rp_pos_cap_err; 323 int pos_cap_err, rp_pos_cap_err;
324 u32 sever; 324 u32 sever, cor_mask, uncor_mask;
325 int ret = 0; 325 int ret = 0;
326 326
327 dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); 327 dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
@@ -339,6 +339,9 @@ static int aer_inject(struct aer_error_inj *einj)
339 goto out_put; 339 goto out_put;
340 } 340 }
341 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever); 341 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
342 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask);
343 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
344 &uncor_mask);
342 345
343 rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); 346 rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
344 if (!rp_pos_cap_err) { 347 if (!rp_pos_cap_err) {
@@ -374,6 +377,21 @@ static int aer_inject(struct aer_error_inj *einj)
374 err->header_log2 = einj->header_log2; 377 err->header_log2 = einj->header_log2;
375 err->header_log3 = einj->header_log3; 378 err->header_log3 = einj->header_log3;
376 379
380 if (einj->cor_status && !(einj->cor_status & ~cor_mask)) {
381 ret = -EINVAL;
382 printk(KERN_WARNING "The correctable error(s) is masked "
383 "by device\n");
384 spin_unlock_irqrestore(&inject_lock, flags);
385 goto out_put;
386 }
387 if (einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) {
388 ret = -EINVAL;
389 printk(KERN_WARNING "The uncorrectable error(s) is masked "
390 "by device\n");
391 spin_unlock_irqrestore(&inject_lock, flags);
392 goto out_put;
393 }
394
377 rperr = __find_aer_error_by_dev(rpdev); 395 rperr = __find_aer_error_by_dev(rpdev);
378 if (!rperr) { 396 if (!rperr) {
379 rperr = rperr_alloc; 397 rperr = rperr_alloc;
@@ -413,8 +431,14 @@ static int aer_inject(struct aer_error_inj *einj)
413 if (ret) 431 if (ret)
414 goto out_put; 432 goto out_put;
415 433
416 if (find_aer_device(rpdev, &edev)) 434 if (find_aer_device(rpdev, &edev)) {
435 if (!get_service_data(edev)) {
436 printk(KERN_WARNING "AER service is not initialized\n");
437 ret = -EINVAL;
438 goto out_put;
439 }
417 aer_irq(-1, edev); 440 aer_irq(-1, edev);
441 }
418 else 442 else
419 ret = -EINVAL; 443 ret = -EINVAL;
420out_put: 444out_put:
@@ -484,5 +508,5 @@ static void __exit aer_inject_exit(void)
484module_init(aer_inject_init); 508module_init(aer_inject_init);
485module_exit(aer_inject_exit); 509module_exit(aer_inject_exit);
486 510
487MODULE_DESCRIPTION("PCIE AER software error injector"); 511MODULE_DESCRIPTION("PCIe AER software error injector");
488MODULE_LICENSE("GPL"); 512MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 97a345927b55..21f215f4daa3 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -155,7 +155,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
155 mutex_init(&rpc->rpc_mutex); 155 mutex_init(&rpc->rpc_mutex);
156 init_waitqueue_head(&rpc->wait_release); 156 init_waitqueue_head(&rpc->wait_release);
157 157
158 /* Use PCIE bus function to store rpc into PCIE device */ 158 /* Use PCIe bus function to store rpc into PCIe device */
159 set_service_data(dev, rpc); 159 set_service_data(dev, rpc);
160 160
161 return rpc; 161 return rpc;
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 8edb2f300e8f..04814087658d 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -24,7 +24,7 @@
24 * 24 *
25 * @return: Zero on success. Nonzero otherwise. 25 * @return: Zero on success. Nonzero otherwise.
26 * 26 *
27 * Invoked when PCIE bus loads AER service driver. To avoid conflict with 27 * Invoked when PCIe bus loads AER service driver. To avoid conflict with
28 * BIOS AER support requires BIOS to yield AER control to OS native driver. 28 * BIOS AER support requires BIOS to yield AER control to OS native driver.
29 **/ 29 **/
30int aer_osc_setup(struct pcie_device *pciedev) 30int aer_osc_setup(struct pcie_device *pciedev)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index ae672ca80333..c843a799814d 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -587,7 +587,7 @@ static void handle_error_source(struct pcie_device *aerdev,
587 * aer_enable_rootport - enable Root Port's interrupts when receiving messages 587 * aer_enable_rootport - enable Root Port's interrupts when receiving messages
588 * @rpc: pointer to a Root Port data structure 588 * @rpc: pointer to a Root Port data structure
589 * 589 *
590 * Invoked when PCIE bus loads AER service driver. 590 * Invoked when PCIe bus loads AER service driver.
591 */ 591 */
592void aer_enable_rootport(struct aer_rpc *rpc) 592void aer_enable_rootport(struct aer_rpc *rpc)
593{ 593{
@@ -597,7 +597,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
597 u32 reg32; 597 u32 reg32;
598 598
599 pos = pci_pcie_cap(pdev); 599 pos = pci_pcie_cap(pdev);
600 /* Clear PCIE Capability's Device Status */ 600 /* Clear PCIe Capability's Device Status */
601 pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16); 601 pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
602 pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); 602 pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
603 603
@@ -631,7 +631,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
631 * disable_root_aer - disable Root Port's interrupts when receiving messages 631 * disable_root_aer - disable Root Port's interrupts when receiving messages
632 * @rpc: pointer to a Root Port data structure 632 * @rpc: pointer to a Root Port data structure
633 * 633 *
634 * Invoked when PCIE bus unloads AER service driver. 634 * Invoked when PCIe bus unloads AER service driver.
635 */ 635 */
636static void disable_root_aer(struct aer_rpc *rpc) 636static void disable_root_aer(struct aer_rpc *rpc)
637{ 637{
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 44acde72294f..9d3e4c8d0184 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -184,7 +184,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
184 184
185 if (info->status == 0) { 185 if (info->status == 0) {
186 AER_PR(info, dev, 186 AER_PR(info, dev,
187 "PCIE Bus Error: severity=%s, type=Unaccessible, " 187 "PCIe Bus Error: severity=%s, type=Unaccessible, "
188 "id=%04x(Unregistered Agent ID)\n", 188 "id=%04x(Unregistered Agent ID)\n",
189 aer_error_severity_string[info->severity], id); 189 aer_error_severity_string[info->severity], id);
190 } else { 190 } else {
@@ -194,7 +194,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
194 agent = AER_GET_AGENT(info->severity, info->status); 194 agent = AER_GET_AGENT(info->severity, info->status);
195 195
196 AER_PR(info, dev, 196 AER_PR(info, dev,
197 "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n", 197 "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
198 aer_error_severity_string[info->severity], 198 aer_error_severity_string[info->severity],
199 aer_error_layer[layer], id, aer_agent_string[agent]); 199 aer_error_layer[layer], id, aer_agent_string[agent]);
200 200
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5a01fc7fbf05..be53d98fa384 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * File: drivers/pci/pcie/aspm.c 2 * File: drivers/pci/pcie/aspm.c
3 * Enabling PCIE link L0s/L1 state and Clock Power Management 3 * Enabling PCIe link L0s/L1 state and Clock Power Management
4 * 4 *
5 * Copyright (C) 2007 Intel 5 * Copyright (C) 2007 Intel
6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) 6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
@@ -499,7 +499,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
499 int pos; 499 int pos;
500 u32 reg32; 500 u32 reg32;
501 /* 501 /*
502 * Some functions in a slot might not all be PCIE functions, 502 * Some functions in a slot might not all be PCIe functions,
503 * very strange. Disable ASPM for the whole slot 503 * very strange. Disable ASPM for the whole slot
504 */ 504 */
505 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { 505 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 413262eb95b7..b174188ac121 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -27,7 +27,7 @@
27 */ 27 */
28static void release_pcie_device(struct device *dev) 28static void release_pcie_device(struct device *dev)
29{ 29{
30 kfree(to_pcie_device(dev)); 30 kfree(to_pcie_device(dev));
31} 31}
32 32
33/** 33/**
@@ -346,12 +346,11 @@ static int suspend_iter(struct device *dev, void *data)
346{ 346{
347 struct pcie_port_service_driver *service_driver; 347 struct pcie_port_service_driver *service_driver;
348 348
349 if ((dev->bus == &pcie_port_bus_type) && 349 if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
350 (dev->driver)) { 350 service_driver = to_service_driver(dev->driver);
351 service_driver = to_service_driver(dev->driver); 351 if (service_driver->suspend)
352 if (service_driver->suspend) 352 service_driver->suspend(to_pcie_device(dev));
353 service_driver->suspend(to_pcie_device(dev)); 353 }
354 }
355 return 0; 354 return 0;
356} 355}
357 356
@@ -494,6 +493,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)
494 493
495 return driver_register(&new->driver); 494 return driver_register(&new->driver);
496} 495}
496EXPORT_SYMBOL(pcie_port_service_register);
497 497
498/** 498/**
499 * pcie_port_service_unregister - unregister PCI Express port service driver 499 * pcie_port_service_unregister - unregister PCI Express port service driver
@@ -503,6 +503,4 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
503{ 503{
504 driver_unregister(&drv->driver); 504 driver_unregister(&drv->driver);
505} 505}
506
507EXPORT_SYMBOL(pcie_port_service_register);
508EXPORT_SYMBOL(pcie_port_service_unregister); 506EXPORT_SYMBOL(pcie_port_service_unregister);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index a49452e2aed9..13c8972886e6 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -24,7 +24,7 @@
24 */ 24 */
25#define DRIVER_VERSION "v1.0" 25#define DRIVER_VERSION "v1.0"
26#define DRIVER_AUTHOR "tom.l.nguyen@intel.com" 26#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
27#define DRIVER_DESC "PCIE Port Bus Driver" 27#define DRIVER_DESC "PCIe Port Bus Driver"
28MODULE_AUTHOR(DRIVER_AUTHOR); 28MODULE_AUTHOR(DRIVER_AUTHOR);
29MODULE_DESCRIPTION(DRIVER_DESC); 29MODULE_DESCRIPTION(DRIVER_DESC);
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
@@ -63,7 +63,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
63 * pcie_portdrv_probe - Probe PCI-Express port devices 63 * pcie_portdrv_probe - Probe PCI-Express port devices
64 * @dev: PCI-Express port device being probed 64 * @dev: PCI-Express port device being probed
65 * 65 *
66 * If detected invokes the pcie_port_device_register() method for 66 * If detected invokes the pcie_port_device_register() method for
67 * this port device. 67 * this port device.
68 * 68 *
69 */ 69 */
@@ -78,7 +78,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
78 (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))) 78 (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
79 return -ENODEV; 79 return -ENODEV;
80 80
81 if (!dev->irq && dev->pin) { 81 if (!dev->irq && dev->pin) {
82 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " 82 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
83 "check vendor BIOS\n", dev->vendor, dev->device); 83 "check vendor BIOS\n", dev->vendor, dev->device);
84 } 84 }
@@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
91 return 0; 91 return 0;
92} 92}
93 93
94static void pcie_portdrv_remove (struct pci_dev *dev) 94static void pcie_portdrv_remove(struct pci_dev *dev)
95{ 95{
96 pcie_port_device_remove(dev); 96 pcie_port_device_remove(dev);
97 pci_disable_device(dev); 97 pci_disable_device(dev);
@@ -129,14 +129,13 @@ static int error_detected_iter(struct device *device, void *data)
129static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, 129static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
130 enum pci_channel_state error) 130 enum pci_channel_state error)
131{ 131{
132 struct aer_broadcast_data result_data = 132 struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER};
133 {error, PCI_ERS_RESULT_CAN_RECOVER}; 133 int ret;
134 int retval;
135 134
136 /* can not fail */ 135 /* can not fail */
137 retval = device_for_each_child(&dev->dev, &result_data, error_detected_iter); 136 ret = device_for_each_child(&dev->dev, &data, error_detected_iter);
138 137
139 return result_data.result; 138 return data.result;
140} 139}
141 140
142static int mmio_enabled_iter(struct device *device, void *data) 141static int mmio_enabled_iter(struct device *device, void *data)
@@ -290,7 +289,7 @@ static int __init pcie_portdrv_init(void)
290 return retval; 289 return retval;
291} 290}
292 291
293static void __exit pcie_portdrv_exit(void) 292static void __exit pcie_portdrv_exit(void)
294{ 293{
295 pci_unregister_driver(&pcie_portdriver); 294 pci_unregister_driver(&pcie_portdriver);
296 pcie_port_bus_unregister(); 295 pcie_port_bus_unregister();
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 98ffb2de22e9..446e4a94d7d3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -681,7 +681,7 @@ static void pci_read_irq(struct pci_dev *dev)
681 dev->irq = irq; 681 dev->irq = irq;
682} 682}
683 683
684static void set_pcie_port_type(struct pci_dev *pdev) 684void set_pcie_port_type(struct pci_dev *pdev)
685{ 685{
686 int pos; 686 int pos;
687 u16 reg16; 687 u16 reg16;
@@ -695,7 +695,7 @@ static void set_pcie_port_type(struct pci_dev *pdev)
695 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; 695 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
696} 696}
697 697
698static void set_pcie_hotplug_bridge(struct pci_dev *pdev) 698void set_pcie_hotplug_bridge(struct pci_dev *pdev)
699{ 699{
700 int pos; 700 int pos;
701 u16 reg16; 701 u16 reg16;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7cfa7c38d318..d58b94030ef3 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev)
338DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); 338DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
339DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); 339DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
340 340
341/*
342 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
343 * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
344 * BAR0 should be 8 bytes; instead, it may be set to something like 8k
345 * (which conflicts w/ BAR1's memory range).
346 */
347static void __devinit quirk_cs5536_vsa(struct pci_dev *dev)
348{
349 if (pci_resource_len(dev, 0) != 8) {
350 struct resource *res = &dev->resource[0];
351 res->end = res->start + 8 - 1;
352 dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
353 "(incorrect header); workaround applied.\n");
354 }
355}
356DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
357
341static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, 358static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
342 unsigned size, int nr, const char *name) 359 unsigned size, int nr, const char *name)
343{ 360{
@@ -2629,14 +2646,86 @@ static int __init pci_apply_final_quirks(void)
2629 if (!pci_cache_line_size) { 2646 if (!pci_cache_line_size) {
2630 printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n", 2647 printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
2631 cls << 2, pci_dfl_cache_line_size << 2); 2648 cls << 2, pci_dfl_cache_line_size << 2);
2632 pci_cache_line_size = cls; 2649 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
2633 } 2650 }
2634 2651
2635 return 0; 2652 return 0;
2636} 2653}
2637 2654
2638fs_initcall_sync(pci_apply_final_quirks); 2655fs_initcall_sync(pci_apply_final_quirks);
2656
2657/*
2658 * Followings are device-specific reset methods which can be used to
2659 * reset a single function if other methods (e.g. FLR, PM D0->D3) are
2660 * not available.
2661 */
2662static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
2663{
2664 int pos;
2665
2666 /* only implement PCI_CLASS_SERIAL_USB at present */
2667 if (dev->class == PCI_CLASS_SERIAL_USB) {
2668 pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
2669 if (!pos)
2670 return -ENOTTY;
2671
2672 if (probe)
2673 return 0;
2674
2675 pci_write_config_byte(dev, pos + 0x4, 1);
2676 msleep(100);
2677
2678 return 0;
2679 } else {
2680 return -ENOTTY;
2681 }
2682}
2683
2684static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
2685{
2686 int pos;
2687
2688 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
2689 if (!pos)
2690 return -ENOTTY;
2691
2692 if (probe)
2693 return 0;
2694
2695 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
2696 PCI_EXP_DEVCTL_BCR_FLR);
2697 msleep(100);
2698
2699 return 0;
2700}
2701
2702#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
2703
2704static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
2705 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
2706 reset_intel_82599_sfp_virtfn },
2707 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2708 reset_intel_generic_dev },
2709 { 0 }
2710};
2711
2712int pci_dev_specific_reset(struct pci_dev *dev, int probe)
2713{
2714 const struct pci_dev_reset_methods *i;
2715
2716 for (i = pci_dev_reset_methods; i->reset; i++) {
2717 if ((i->vendor == dev->vendor ||
2718 i->vendor == (u16)PCI_ANY_ID) &&
2719 (i->device == dev->device ||
2720 i->device == (u16)PCI_ANY_ID))
2721 return i->reset(dev, probe);
2722 }
2723
2724 return -ENOTTY;
2725}
2726
2639#else 2727#else
2640void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} 2728void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
2729int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; }
2641#endif 2730#endif
2642EXPORT_SYMBOL(pci_fixup_device); 2731EXPORT_SYMBOL(pci_fixup_device);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 6dae87143258..4a471dc4f4b9 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -15,9 +15,9 @@
15 15
16DECLARE_RWSEM(pci_bus_sem); 16DECLARE_RWSEM(pci_bus_sem);
17/* 17/*
18 * find the upstream PCIE-to-PCI bridge of a PCI device 18 * find the upstream PCIe-to-PCI bridge of a PCI device
19 * if the device is PCIE, return NULL 19 * if the device is PCIE, return NULL
20 * if the device isn't connected to a PCIE bridge (that is its parent is a 20 * if the device isn't connected to a PCIe bridge (that is its parent is a
21 * legacy PCI bridge and the bridge is directly connected to bus 0), return its 21 * legacy PCI bridge and the bridge is directly connected to bus 0), return its
22 * parent 22 * parent
23 */ 23 */
@@ -37,7 +37,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
37 tmp = pdev; 37 tmp = pdev;
38 continue; 38 continue;
39 } 39 }
40 /* PCI device should connect to a PCIE bridge */ 40 /* PCI device should connect to a PCIe bridge */
41 if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { 41 if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) {
42 /* Busted hardware? */ 42 /* Busted hardware? */
43 WARN_ON_ONCE(1); 43 WARN_ON_ONCE(1);
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index cdf50f3bc2df..d99f846451a3 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -222,7 +222,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
222 unsigned int max, pass; 222 unsigned int max, pass;
223 223
224 s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); 224 s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
225/* pcibios_fixup_bus(bus); */ 225 pci_fixup_cardbus(bus);
226 226
227 max = bus->secondary; 227 max = bus->secondary;
228 for (pass = 0; pass < 2; pass++) 228 for (pass = 0; pass < 2; pass++)
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index de6bc333d299..db79ca61cf96 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -21,11 +21,18 @@
21 21
22#include "sa1111_generic.h" 22#include "sa1111_generic.h"
23 23
24#define IDX_IRQ_S0_READY_NINT (0)
25#define IDX_IRQ_S0_CD_VALID (1)
26#define IDX_IRQ_S0_BVD1_STSCHG (2)
27#define IDX_IRQ_S1_READY_NINT (3)
28#define IDX_IRQ_S1_CD_VALID (4)
29#define IDX_IRQ_S1_BVD1_STSCHG (5)
30
24static struct pcmcia_irqs irqs[] = { 31static struct pcmcia_irqs irqs[] = {
25 { 0, IRQ_S0_CD_VALID, "SA1111 PCMCIA card detect" }, 32 { 0, NO_IRQ, "SA1111 PCMCIA card detect" },
26 { 0, IRQ_S0_BVD1_STSCHG, "SA1111 PCMCIA BVD1" }, 33 { 0, NO_IRQ, "SA1111 PCMCIA BVD1" },
27 { 1, IRQ_S1_CD_VALID, "SA1111 CF card detect" }, 34 { 1, NO_IRQ, "SA1111 CF card detect" },
28 { 1, IRQ_S1_BVD1_STSCHG, "SA1111 CF BVD1" }, 35 { 1, NO_IRQ, "SA1111 CF BVD1" },
29}; 36};
30 37
31static int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 38static int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
@@ -136,7 +143,9 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
136 s->soc.ops = ops; 143 s->soc.ops = ops;
137 s->soc.socket.owner = ops->owner; 144 s->soc.socket.owner = ops->owner;
138 s->soc.socket.dev.parent = &dev->dev; 145 s->soc.socket.dev.parent = &dev->dev;
139 s->soc.socket.pci_irq = s->soc.nr ? IRQ_S1_READY_NINT : IRQ_S0_READY_NINT; 146 s->soc.socket.pci_irq = s->soc.nr ?
147 dev->irq[IDX_IRQ_S0_READY_NINT] :
148 dev->irq[IDX_IRQ_S1_READY_NINT];
140 s->dev = dev; 149 s->dev = dev;
141 150
142 ret = add(&s->soc); 151 ret = add(&s->soc);
@@ -162,6 +171,12 @@ static int pcmcia_probe(struct sa1111_dev *dev)
162 171
163 base = dev->mapbase; 172 base = dev->mapbase;
164 173
174 /* Initialize PCMCIA IRQs */
175 irqs[0].irq = dev->irq[IDX_IRQ_S0_CD_VALID];
176 irqs[1].irq = dev->irq[IDX_IRQ_S0_BVD1_STSCHG];
177 irqs[2].irq = dev->irq[IDX_IRQ_S1_CD_VALID];
178 irqs[3].irq = dev->irq[IDX_IRQ_S1_BVD1_STSCHG];
179
165 /* 180 /*
166 * Initialise the suspend state. 181 * Initialise the suspend state.
167 */ 182 */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ec4faffe6b05..f526e735c5ab 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -231,8 +231,36 @@ config THINKPAD_ACPI
231 231
232 This driver was formerly known as ibm-acpi. 232 This driver was formerly known as ibm-acpi.
233 233
234 Extra functionality will be available if the rfkill (CONFIG_RFKILL)
235 and/or ALSA (CONFIG_SND) subsystems are available in the kernel.
236 Note that if you want ThinkPad-ACPI to be built-in instead of
237 modular, ALSA and rfkill will also have to be built-in.
238
234 If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. 239 If you have an IBM or Lenovo ThinkPad laptop, say Y or M here.
235 240
241config THINKPAD_ACPI_ALSA_SUPPORT
242 bool "Console audio control ALSA interface"
243 depends on THINKPAD_ACPI
244 depends on SND
245 depends on SND = y || THINKPAD_ACPI = SND
246 default y
247 ---help---
248 Enables monitoring of the built-in console audio output control
249 (headphone and speakers), which is operated by the mute and (in
250 some ThinkPad models) volume hotkeys.
251
252 If this option is enabled, ThinkPad-ACPI will export an ALSA card
253 with a single read-only mixer control, which should be used for
254 on-screen-display feedback purposes by the Desktop Environment.
255
256 Optionally, the driver will also allow software control (the
257 ALSA mixer will be made read-write). Please refer to the driver
258 documentation for details.
259
260 All IBM models have both volume and mute control. Newer Lenovo
261 models only have mute control (the volume hotkeys are just normal
262 keys and volume control is done through the main HDA mixer).
263
236config THINKPAD_ACPI_DEBUGFACILITIES 264config THINKPAD_ACPI_DEBUGFACILITIES
237 bool "Maintainer debug facilities" 265 bool "Maintainer debug facilities"
238 depends on THINKPAD_ACPI 266 depends on THINKPAD_ACPI
@@ -336,6 +364,7 @@ config EEEPC_LAPTOP
336 select HWMON 364 select HWMON
337 select LEDS_CLASS 365 select LEDS_CLASS
338 select NEW_LEDS 366 select NEW_LEDS
367 select INPUT_SPARSEKMAP
339 ---help--- 368 ---help---
340 This driver supports the Fn-Fx keys on Eee PC laptops. 369 This driver supports the Fn-Fx keys on Eee PC laptops.
341 370
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 916ccb2b316c..1b1dddbd5744 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -202,8 +202,13 @@ static void dell_wmi_notify(u32 value, void *context)
202 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; 202 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
203 static struct key_entry *key; 203 static struct key_entry *key;
204 union acpi_object *obj; 204 union acpi_object *obj;
205 acpi_status status;
205 206
206 wmi_get_event_data(value, &response); 207 status = wmi_get_event_data(value, &response);
208 if (status != AE_OK) {
209 printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status);
210 return;
211 }
207 212
208 obj = (union acpi_object *)response.pointer; 213 obj = (union acpi_object *)response.pointer;
209 214
@@ -323,8 +328,9 @@ static int __init dell_wmi_input_setup(void)
323static int __init dell_wmi_init(void) 328static int __init dell_wmi_init(void)
324{ 329{
325 int err; 330 int err;
331 acpi_status status;
326 332
327 if (wmi_has_guid(DELL_EVENT_GUID)) { 333 if (!wmi_has_guid(DELL_EVENT_GUID)) {
328 printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n"); 334 printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n");
329 return -ENODEV; 335 return -ENODEV;
330 } 336 }
@@ -336,14 +342,14 @@ static int __init dell_wmi_init(void)
336 if (err) 342 if (err)
337 return err; 343 return err;
338 344
339 err = wmi_install_notify_handler(DELL_EVENT_GUID, 345 status = wmi_install_notify_handler(DELL_EVENT_GUID,
340 dell_wmi_notify, NULL); 346 dell_wmi_notify, NULL);
341 if (err) { 347 if (ACPI_FAILURE(status)) {
342 input_unregister_device(dell_wmi_input_dev); 348 input_unregister_device(dell_wmi_input_dev);
343 printk(KERN_ERR 349 printk(KERN_ERR
344 "dell-wmi: Unable to register notify handler - %d\n", 350 "dell-wmi: Unable to register notify handler - %d\n",
345 err); 351 status);
346 return err; 352 return -ENODEV;
347 } 353 }
348 354
349 return 0; 355 return 0;
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 5838c69b2fb3..e2be6bb33d92 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -31,10 +31,12 @@
31#include <acpi/acpi_bus.h> 31#include <acpi/acpi_bus.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33#include <linux/input.h> 33#include <linux/input.h>
34#include <linux/input/sparse-keymap.h>
34#include <linux/rfkill.h> 35#include <linux/rfkill.h>
35#include <linux/pci.h> 36#include <linux/pci.h>
36#include <linux/pci_hotplug.h> 37#include <linux/pci_hotplug.h>
37#include <linux/leds.h> 38#include <linux/leds.h>
39#include <linux/dmi.h>
38 40
39#define EEEPC_LAPTOP_VERSION "0.1" 41#define EEEPC_LAPTOP_VERSION "0.1"
40#define EEEPC_LAPTOP_NAME "Eee PC Hotkey Driver" 42#define EEEPC_LAPTOP_NAME "Eee PC Hotkey Driver"
@@ -48,6 +50,14 @@ MODULE_AUTHOR("Corentin Chary, Eric Cooper");
48MODULE_DESCRIPTION(EEEPC_LAPTOP_NAME); 50MODULE_DESCRIPTION(EEEPC_LAPTOP_NAME);
49MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
50 52
53static bool hotplug_disabled;
54
55module_param(hotplug_disabled, bool, 0644);
56MODULE_PARM_DESC(hotplug_disabled,
57 "Disable hotplug for wireless device. "
58 "If your laptop need that, please report to "
59 "acpi4asus-user@lists.sourceforge.net.");
60
51/* 61/*
52 * Definitions for Asus EeePC 62 * Definitions for Asus EeePC
53 */ 63 */
@@ -120,38 +130,28 @@ static const char *cm_setv[] = {
120 NULL, NULL, "PBPS", "TPDS" 130 NULL, NULL, "PBPS", "TPDS"
121}; 131};
122 132
123struct key_entry {
124 char type;
125 u8 code;
126 u16 keycode;
127};
128
129enum { KE_KEY, KE_END };
130
131static const struct key_entry eeepc_keymap[] = { 133static const struct key_entry eeepc_keymap[] = {
132 /* Sleep already handled via generic ACPI code */ 134 { KE_KEY, 0x10, { KEY_WLAN } },
133 {KE_KEY, 0x10, KEY_WLAN }, 135 { KE_KEY, 0x11, { KEY_WLAN } },
134 {KE_KEY, 0x11, KEY_WLAN }, 136 { KE_KEY, 0x12, { KEY_PROG1 } },
135 {KE_KEY, 0x12, KEY_PROG1 }, 137 { KE_KEY, 0x13, { KEY_MUTE } },
136 {KE_KEY, 0x13, KEY_MUTE }, 138 { KE_KEY, 0x14, { KEY_VOLUMEDOWN } },
137 {KE_KEY, 0x14, KEY_VOLUMEDOWN }, 139 { KE_KEY, 0x15, { KEY_VOLUMEUP } },
138 {KE_KEY, 0x15, KEY_VOLUMEUP }, 140 { KE_KEY, 0x16, { KEY_DISPLAY_OFF } },
139 {KE_KEY, 0x16, KEY_DISPLAY_OFF }, 141 { KE_KEY, 0x1a, { KEY_COFFEE } },
140 {KE_KEY, 0x1a, KEY_COFFEE }, 142 { KE_KEY, 0x1b, { KEY_ZOOM } },
141 {KE_KEY, 0x1b, KEY_ZOOM }, 143 { KE_KEY, 0x1c, { KEY_PROG2 } },
142 {KE_KEY, 0x1c, KEY_PROG2 }, 144 { KE_KEY, 0x1d, { KEY_PROG3 } },
143 {KE_KEY, 0x1d, KEY_PROG3 }, 145 { KE_KEY, NOTIFY_BRN_MIN, { KEY_BRIGHTNESSDOWN } },
144 {KE_KEY, NOTIFY_BRN_MIN, KEY_BRIGHTNESSDOWN }, 146 { KE_KEY, NOTIFY_BRN_MAX, { KEY_BRIGHTNESSUP } },
145 {KE_KEY, NOTIFY_BRN_MAX, KEY_BRIGHTNESSUP }, 147 { KE_KEY, 0x30, { KEY_SWITCHVIDEOMODE } },
146 {KE_KEY, 0x30, KEY_SWITCHVIDEOMODE }, 148 { KE_KEY, 0x31, { KEY_SWITCHVIDEOMODE } },
147 {KE_KEY, 0x31, KEY_SWITCHVIDEOMODE }, 149 { KE_KEY, 0x32, { KEY_SWITCHVIDEOMODE } },
148 {KE_KEY, 0x32, KEY_SWITCHVIDEOMODE }, 150 { KE_KEY, 0x37, { KEY_F13 } }, /* Disable Touchpad */
149 {KE_KEY, 0x37, KEY_F13 }, /* Disable Touchpad */ 151 { KE_KEY, 0x38, { KEY_F14 } },
150 {KE_KEY, 0x38, KEY_F14 }, 152 { KE_END, 0 },
151 {KE_END, 0},
152}; 153};
153 154
154
155/* 155/*
156 * This is the main structure, we can use it to store useful information 156 * This is the main structure, we can use it to store useful information
157 */ 157 */
@@ -159,6 +159,8 @@ struct eeepc_laptop {
159 acpi_handle handle; /* the handle of the acpi device */ 159 acpi_handle handle; /* the handle of the acpi device */
160 u32 cm_supported; /* the control methods supported 160 u32 cm_supported; /* the control methods supported
161 by this BIOS */ 161 by this BIOS */
162 bool cpufv_disabled;
163 bool hotplug_disabled;
162 u16 event_count[128]; /* count for each event */ 164 u16 event_count[128]; /* count for each event */
163 165
164 struct platform_device *platform_device; 166 struct platform_device *platform_device;
@@ -378,6 +380,8 @@ static ssize_t store_cpufv(struct device *dev,
378 struct eeepc_cpufv c; 380 struct eeepc_cpufv c;
379 int rv, value; 381 int rv, value;
380 382
383 if (eeepc->cpufv_disabled)
384 return -EPERM;
381 if (get_cpufv(eeepc, &c)) 385 if (get_cpufv(eeepc, &c))
382 return -ENODEV; 386 return -ENODEV;
383 rv = parse_arg(buf, count, &value); 387 rv = parse_arg(buf, count, &value);
@@ -389,6 +393,41 @@ static ssize_t store_cpufv(struct device *dev,
389 return rv; 393 return rv;
390} 394}
391 395
396static ssize_t show_cpufv_disabled(struct device *dev,
397 struct device_attribute *attr,
398 char *buf)
399{
400 struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
401
402 return sprintf(buf, "%d\n", eeepc->cpufv_disabled);
403}
404
405static ssize_t store_cpufv_disabled(struct device *dev,
406 struct device_attribute *attr,
407 const char *buf, size_t count)
408{
409 struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
410 int rv, value;
411
412 rv = parse_arg(buf, count, &value);
413 if (rv < 0)
414 return rv;
415
416 switch (value) {
417 case 0:
418 if (eeepc->cpufv_disabled)
419 pr_warning("cpufv enabled (not officially supported "
420 "on this model)\n");
421 eeepc->cpufv_disabled = false;
422 return rv;
423 case 1:
424 return -EPERM;
425 default:
426 return -EINVAL;
427 }
428}
429
430
392static struct device_attribute dev_attr_cpufv = { 431static struct device_attribute dev_attr_cpufv = {
393 .attr = { 432 .attr = {
394 .name = "cpufv", 433 .name = "cpufv",
@@ -404,12 +443,22 @@ static struct device_attribute dev_attr_available_cpufv = {
404 .show = show_available_cpufv 443 .show = show_available_cpufv
405}; 444};
406 445
446static struct device_attribute dev_attr_cpufv_disabled = {
447 .attr = {
448 .name = "cpufv_disabled",
449 .mode = 0644 },
450 .show = show_cpufv_disabled,
451 .store = store_cpufv_disabled
452};
453
454
407static struct attribute *platform_attributes[] = { 455static struct attribute *platform_attributes[] = {
408 &dev_attr_camera.attr, 456 &dev_attr_camera.attr,
409 &dev_attr_cardr.attr, 457 &dev_attr_cardr.attr,
410 &dev_attr_disp.attr, 458 &dev_attr_disp.attr,
411 &dev_attr_cpufv.attr, 459 &dev_attr_cpufv.attr,
412 &dev_attr_available_cpufv.attr, 460 &dev_attr_available_cpufv.attr,
461 &dev_attr_cpufv_disabled.attr,
413 NULL 462 NULL
414}; 463};
415 464
@@ -796,6 +845,9 @@ static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
796 if (result && result != -ENODEV) 845 if (result && result != -ENODEV)
797 goto exit; 846 goto exit;
798 847
848 if (eeepc->hotplug_disabled)
849 return 0;
850
799 result = eeepc_setup_pci_hotplug(eeepc); 851 result = eeepc_setup_pci_hotplug(eeepc);
800 /* 852 /*
801 * If we get -EBUSY then something else is handling the PCI hotplug - 853 * If we get -EBUSY then something else is handling the PCI hotplug -
@@ -1090,120 +1142,42 @@ static void eeepc_backlight_exit(struct eeepc_laptop *eeepc)
1090/* 1142/*
1091 * Input device (i.e. hotkeys) 1143 * Input device (i.e. hotkeys)
1092 */ 1144 */
1093static struct key_entry *eeepc_get_entry_by_scancode( 1145static int eeepc_input_init(struct eeepc_laptop *eeepc)
1094 struct eeepc_laptop *eeepc,
1095 int code)
1096{ 1146{
1097 struct key_entry *key; 1147 struct input_dev *input;
1148 int error;
1098 1149
1099 for (key = eeepc->keymap; key->type != KE_END; key++) 1150 input = input_allocate_device();
1100 if (code == key->code) 1151 if (!input) {
1101 return key; 1152 pr_info("Unable to allocate input device\n");
1102 1153 return -ENOMEM;
1103 return NULL;
1104}
1105
1106static void eeepc_input_notify(struct eeepc_laptop *eeepc, int event)
1107{
1108 static struct key_entry *key;
1109
1110 key = eeepc_get_entry_by_scancode(eeepc, event);
1111 if (key) {
1112 switch (key->type) {
1113 case KE_KEY:
1114 input_report_key(eeepc->inputdev, key->keycode,
1115 1);
1116 input_sync(eeepc->inputdev);
1117 input_report_key(eeepc->inputdev, key->keycode,
1118 0);
1119 input_sync(eeepc->inputdev);
1120 break;
1121 }
1122 } 1154 }
1123}
1124
1125static struct key_entry *eeepc_get_entry_by_keycode(
1126 struct eeepc_laptop *eeepc, int code)
1127{
1128 struct key_entry *key;
1129
1130 for (key = eeepc->keymap; key->type != KE_END; key++)
1131 if (code == key->keycode && key->type == KE_KEY)
1132 return key;
1133 1155
1134 return NULL; 1156 input->name = "Asus EeePC extra buttons";
1135} 1157 input->phys = EEEPC_LAPTOP_FILE "/input0";
1158 input->id.bustype = BUS_HOST;
1159 input->dev.parent = &eeepc->platform_device->dev;
1136 1160
1137static int eeepc_getkeycode(struct input_dev *dev, int scancode, int *keycode) 1161 error = sparse_keymap_setup(input, eeepc_keymap, NULL);
1138{ 1162 if (error) {
1139 struct eeepc_laptop *eeepc = input_get_drvdata(dev); 1163 pr_err("Unable to setup input device keymap\n");
1140 struct key_entry *key = eeepc_get_entry_by_scancode(eeepc, scancode); 1164 goto err_free_dev;
1141
1142 if (key && key->type == KE_KEY) {
1143 *keycode = key->keycode;
1144 return 0;
1145 } 1165 }
1146 1166
1147 return -EINVAL; 1167 error = input_register_device(input);
1148} 1168 if (error) {
1149 1169 pr_err("Unable to register input device\n");
1150static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode) 1170 goto err_free_keymap;
1151{
1152 struct eeepc_laptop *eeepc = input_get_drvdata(dev);
1153 struct key_entry *key;
1154 int old_keycode;
1155
1156 if (keycode < 0 || keycode > KEY_MAX)
1157 return -EINVAL;
1158
1159 key = eeepc_get_entry_by_scancode(eeepc, scancode);
1160 if (key && key->type == KE_KEY) {
1161 old_keycode = key->keycode;
1162 key->keycode = keycode;
1163 set_bit(keycode, dev->keybit);
1164 if (!eeepc_get_entry_by_keycode(eeepc, old_keycode))
1165 clear_bit(old_keycode, dev->keybit);
1166 return 0;
1167 } 1171 }
1168 1172
1169 return -EINVAL; 1173 eeepc->inputdev = input;
1170}
1171
1172static int eeepc_input_init(struct eeepc_laptop *eeepc)
1173{
1174 const struct key_entry *key;
1175 int result;
1176
1177 eeepc->inputdev = input_allocate_device();
1178 if (!eeepc->inputdev) {
1179 pr_info("Unable to allocate input device\n");
1180 return -ENOMEM;
1181 }
1182 eeepc->inputdev->name = "Asus EeePC extra buttons";
1183 eeepc->inputdev->dev.parent = &eeepc->platform_device->dev;
1184 eeepc->inputdev->phys = EEEPC_LAPTOP_FILE "/input0";
1185 eeepc->inputdev->id.bustype = BUS_HOST;
1186 eeepc->inputdev->getkeycode = eeepc_getkeycode;
1187 eeepc->inputdev->setkeycode = eeepc_setkeycode;
1188 input_set_drvdata(eeepc->inputdev, eeepc);
1189
1190 eeepc->keymap = kmemdup(eeepc_keymap, sizeof(eeepc_keymap),
1191 GFP_KERNEL);
1192 for (key = eeepc_keymap; key->type != KE_END; key++) {
1193 switch (key->type) {
1194 case KE_KEY:
1195 set_bit(EV_KEY, eeepc->inputdev->evbit);
1196 set_bit(key->keycode, eeepc->inputdev->keybit);
1197 break;
1198 }
1199 }
1200 result = input_register_device(eeepc->inputdev);
1201 if (result) {
1202 pr_info("Unable to register input device\n");
1203 input_free_device(eeepc->inputdev);
1204 return result;
1205 }
1206 return 0; 1174 return 0;
1175
1176 err_free_keymap:
1177 sparse_keymap_free(input);
1178 err_free_dev:
1179 input_free_device(input);
1180 return error;
1207} 1181}
1208 1182
1209static void eeepc_input_exit(struct eeepc_laptop *eeepc) 1183static void eeepc_input_exit(struct eeepc_laptop *eeepc)
@@ -1253,11 +1227,59 @@ static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
1253 * event will be desired value (or else ignored) 1227 * event will be desired value (or else ignored)
1254 */ 1228 */
1255 } 1229 }
1256 eeepc_input_notify(eeepc, event); 1230 sparse_keymap_report_event(eeepc->inputdev, event,
1231 1, true);
1257 } 1232 }
1258 } else { 1233 } else {
1259 /* Everything else is a bona-fide keypress event */ 1234 /* Everything else is a bona-fide keypress event */
1260 eeepc_input_notify(eeepc, event); 1235 sparse_keymap_report_event(eeepc->inputdev, event, 1, true);
1236 }
1237}
1238
1239static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
1240{
1241 const char *model;
1242
1243 model = dmi_get_system_info(DMI_PRODUCT_NAME);
1244 if (!model)
1245 return;
1246
1247 /*
1248 * Blacklist for setting cpufv (cpu speed).
1249 *
1250 * EeePC 4G ("701") implements CFVS, but it is not supported
1251 * by the pre-installed OS, and the original option to change it
1252 * in the BIOS setup screen was removed in later versions.
1253 *
1254 * Judging by the lack of "Super Hybrid Engine" on Asus product pages,
1255 * this applies to all "701" models (4G/4G Surf/2G Surf).
1256 *
1257 * So Asus made a deliberate decision not to support it on this model.
1258 * We have several reports that using it can cause the system to hang
1259 *
1260 * The hang has also been reported on a "702" (Model name "8G"?).
1261 *
1262 * We avoid dmi_check_system() / dmi_match(), because they use
1263 * substring matching. We don't want to affect the "701SD"
1264 * and "701SDX" models, because they do support S.H.E.
1265 */
1266 if (strcmp(model, "701") == 0 || strcmp(model, "702") == 0) {
1267 eeepc->cpufv_disabled = true;
1268 pr_info("model %s does not officially support setting cpu "
1269 "speed\n", model);
1270 pr_info("cpufv disabled to avoid instability\n");
1271 }
1272
1273 /*
1274 * Blacklist for wlan hotplug
1275 *
1276 * Eeepc 1005HA doesn't work like others models and don't need the
1277 * hotplug code. In fact, current hotplug code seems to unplug another
1278 * device...
1279 */
1280 if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0) {
1281 eeepc->hotplug_disabled = true;
1282 pr_info("wlan hotplug disabled\n");
1261 } 1283 }
1262} 1284}
1263 1285
@@ -1342,6 +1364,10 @@ static int __devinit eeepc_acpi_add(struct acpi_device *device)
1342 strcpy(acpi_device_class(device), EEEPC_ACPI_CLASS); 1364 strcpy(acpi_device_class(device), EEEPC_ACPI_CLASS);
1343 device->driver_data = eeepc; 1365 device->driver_data = eeepc;
1344 1366
1367 eeepc->hotplug_disabled = hotplug_disabled;
1368
1369 eeepc_dmi_check(eeepc);
1370
1345 result = eeepc_acpi_init(eeepc, device); 1371 result = eeepc_acpi_init(eeepc, device);
1346 if (result) 1372 if (result)
1347 goto fail_platform; 1373 goto fail_platform;
@@ -1452,10 +1478,12 @@ static int __init eeepc_laptop_init(void)
1452 result = acpi_bus_register_driver(&eeepc_acpi_driver); 1478 result = acpi_bus_register_driver(&eeepc_acpi_driver);
1453 if (result < 0) 1479 if (result < 0)
1454 goto fail_acpi_driver; 1480 goto fail_acpi_driver;
1481
1455 if (!eeepc_device_present) { 1482 if (!eeepc_device_present) {
1456 result = -ENODEV; 1483 result = -ENODEV;
1457 goto fail_no_device; 1484 goto fail_no_device;
1458 } 1485 }
1486
1459 return 0; 1487 return 0;
1460 1488
1461fail_no_device: 1489fail_no_device:
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 8781d8fa7a57..ad4c414dbfbc 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -338,8 +338,13 @@ static void hp_wmi_notify(u32 value, void *context)
338 static struct key_entry *key; 338 static struct key_entry *key;
339 union acpi_object *obj; 339 union acpi_object *obj;
340 int eventcode; 340 int eventcode;
341 acpi_status status;
341 342
342 wmi_get_event_data(value, &response); 343 status = wmi_get_event_data(value, &response);
344 if (status != AE_OK) {
345 printk(KERN_INFO "hp-wmi: bad event status 0x%x\n", status);
346 return;
347 }
343 348
344 obj = (union acpi_object *)response.pointer; 349 obj = (union acpi_object *)response.pointer;
345 350
@@ -388,8 +393,6 @@ static void hp_wmi_notify(u32 value, void *context)
388 } else 393 } else
389 printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", 394 printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
390 eventcode); 395 eventcode);
391
392 kfree(obj);
393} 396}
394 397
395static int __init hp_wmi_input_setup(void) 398static int __init hp_wmi_input_setup(void)
@@ -581,7 +584,7 @@ static int __init hp_wmi_init(void)
581 if (wmi_has_guid(HPWMI_EVENT_GUID)) { 584 if (wmi_has_guid(HPWMI_EVENT_GUID)) {
582 err = wmi_install_notify_handler(HPWMI_EVENT_GUID, 585 err = wmi_install_notify_handler(HPWMI_EVENT_GUID,
583 hp_wmi_notify, NULL); 586 hp_wmi_notify, NULL);
584 if (!err) 587 if (ACPI_SUCCESS(err))
585 hp_wmi_input_setup(); 588 hp_wmi_input_setup();
586 } 589 }
587 590
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 7f77f908bb01..f5f70d4c6913 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -149,8 +149,13 @@ static void msi_wmi_notify(u32 value, void *context)
149 static struct key_entry *key; 149 static struct key_entry *key;
150 union acpi_object *obj; 150 union acpi_object *obj;
151 ktime_t cur; 151 ktime_t cur;
152 acpi_status status;
152 153
153 wmi_get_event_data(value, &response); 154 status = wmi_get_event_data(value, &response);
155 if (status != AE_OK) {
156 printk(KERN_INFO DRV_PFX "bad event status 0x%x\n", status);
157 return;
158 }
154 159
155 obj = (union acpi_object *)response.pointer; 160 obj = (union acpi_object *)response.pointer;
156 161
@@ -236,7 +241,7 @@ static int __init msi_wmi_init(void)
236 } 241 }
237 err = wmi_install_notify_handler(MSIWMI_EVENT_GUID, 242 err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
238 msi_wmi_notify, NULL); 243 msi_wmi_notify, NULL);
239 if (err) 244 if (ACPI_FAILURE(err))
240 return -EINVAL; 245 return -EINVAL;
241 246
242 err = msi_wmi_input_setup(); 247 err = msi_wmi_input_setup();
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 5af53340da6f..3f71a605a492 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1201,9 +1201,12 @@ static void sony_nc_rfkill_setup(struct acpi_device *device)
1201 /* the buffer is filled with magic numbers describing the devices 1201 /* the buffer is filled with magic numbers describing the devices
1202 * available, 0xff terminates the enumeration 1202 * available, 0xff terminates the enumeration
1203 */ 1203 */
1204 while ((dev_code = *(device_enum->buffer.pointer + i)) != 0xff && 1204 for (i = 0; i < device_enum->buffer.length; i++) {
1205 i < device_enum->buffer.length) { 1205
1206 i++; 1206 dev_code = *(device_enum->buffer.pointer + i);
1207 if (dev_code == 0xff)
1208 break;
1209
1207 dprintk("Radio devices, looking at 0x%.2x\n", dev_code); 1210 dprintk("Radio devices, looking at 0x%.2x\n", dev_code);
1208 1211
1209 if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI]) 1212 if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI])
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 448c8aeb166b..eb603f1d55ca 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5771,7 +5771,7 @@ static void thermal_exit(void)
5771 case TPACPI_THERMAL_ACPI_TMP07: 5771 case TPACPI_THERMAL_ACPI_TMP07:
5772 case TPACPI_THERMAL_ACPI_UPDT: 5772 case TPACPI_THERMAL_ACPI_UPDT:
5773 sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, 5773 sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
5774 &thermal_temp_input16_group); 5774 &thermal_temp_input8_group);
5775 break; 5775 break;
5776 case TPACPI_THERMAL_NONE: 5776 case TPACPI_THERMAL_NONE:
5777 default: 5777 default:
@@ -6384,11 +6384,13 @@ static struct ibm_struct brightness_driver_data = {
6384 * and we leave them unchanged. 6384 * and we leave them unchanged.
6385 */ 6385 */
6386 6386
6387#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
6388
6387#define TPACPI_ALSA_DRVNAME "ThinkPad EC" 6389#define TPACPI_ALSA_DRVNAME "ThinkPad EC"
6388#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control" 6390#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control"
6389#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME 6391#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME
6390 6392
6391static int alsa_index = SNDRV_DEFAULT_IDX1; 6393static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */
6392static char *alsa_id = "ThinkPadEC"; 6394static char *alsa_id = "ThinkPadEC";
6393static int alsa_enable = SNDRV_DEFAULT_ENABLE1; 6395static int alsa_enable = SNDRV_DEFAULT_ENABLE1;
6394 6396
@@ -6705,10 +6707,11 @@ static int __init volume_create_alsa_mixer(void)
6705 6707
6706 rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE, 6708 rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE,
6707 sizeof(struct tpacpi_alsa_data), &card); 6709 sizeof(struct tpacpi_alsa_data), &card);
6708 if (rc < 0) 6710 if (rc < 0 || !card) {
6709 return rc; 6711 printk(TPACPI_ERR
6710 if (!card) 6712 "Failed to create ALSA card structures: %d\n", rc);
6711 return -ENOMEM; 6713 return 1;
6714 }
6712 6715
6713 BUG_ON(!card->private_data); 6716 BUG_ON(!card->private_data);
6714 data = card->private_data; 6717 data = card->private_data;
@@ -6741,8 +6744,9 @@ static int __init volume_create_alsa_mixer(void)
6741 rc = snd_ctl_add(card, ctl_vol); 6744 rc = snd_ctl_add(card, ctl_vol);
6742 if (rc < 0) { 6745 if (rc < 0) {
6743 printk(TPACPI_ERR 6746 printk(TPACPI_ERR
6744 "Failed to create ALSA volume control\n"); 6747 "Failed to create ALSA volume control: %d\n",
6745 goto err_out; 6748 rc);
6749 goto err_exit;
6746 } 6750 }
6747 data->ctl_vol_id = &ctl_vol->id; 6751 data->ctl_vol_id = &ctl_vol->id;
6748 } 6752 }
@@ -6750,22 +6754,25 @@ static int __init volume_create_alsa_mixer(void)
6750 ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL); 6754 ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL);
6751 rc = snd_ctl_add(card, ctl_mute); 6755 rc = snd_ctl_add(card, ctl_mute);
6752 if (rc < 0) { 6756 if (rc < 0) {
6753 printk(TPACPI_ERR "Failed to create ALSA mute control\n"); 6757 printk(TPACPI_ERR "Failed to create ALSA mute control: %d\n",
6754 goto err_out; 6758 rc);
6759 goto err_exit;
6755 } 6760 }
6756 data->ctl_mute_id = &ctl_mute->id; 6761 data->ctl_mute_id = &ctl_mute->id;
6757 6762
6758 snd_card_set_dev(card, &tpacpi_pdev->dev); 6763 snd_card_set_dev(card, &tpacpi_pdev->dev);
6759 rc = snd_card_register(card); 6764 rc = snd_card_register(card);
6760
6761err_out:
6762 if (rc < 0) { 6765 if (rc < 0) {
6763 snd_card_free(card); 6766 printk(TPACPI_ERR "Failed to register ALSA card: %d\n", rc);
6764 card = NULL; 6767 goto err_exit;
6765 } 6768 }
6766 6769
6767 alsa_card = card; 6770 alsa_card = card;
6768 return rc; 6771 return 0;
6772
6773err_exit:
6774 snd_card_free(card);
6775 return 1;
6769} 6776}
6770 6777
6771#define TPACPI_VOL_Q_MUTEONLY 0x0001 /* Mute-only control available */ 6778#define TPACPI_VOL_Q_MUTEONLY 0x0001 /* Mute-only control available */
@@ -7016,6 +7023,28 @@ static struct ibm_struct volume_driver_data = {
7016 .shutdown = volume_shutdown, 7023 .shutdown = volume_shutdown,
7017}; 7024};
7018 7025
7026#else /* !CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
7027
7028#define alsa_card NULL
7029
7030static void inline volume_alsa_notify_change(void)
7031{
7032}
7033
7034static int __init volume_init(struct ibm_init_struct *iibm)
7035{
7036 printk(TPACPI_INFO
7037 "volume: disabled as there is no ALSA support in this kernel\n");
7038
7039 return 1;
7040}
7041
7042static struct ibm_struct volume_driver_data = {
7043 .name = "volume",
7044};
7045
7046#endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
7047
7019/************************************************************************* 7048/*************************************************************************
7020 * Fan subdriver 7049 * Fan subdriver
7021 */ 7050 */
@@ -8738,6 +8767,7 @@ MODULE_PARM_DESC(hotkey_report_mode,
8738 "used for backwards compatibility with userspace, " 8767 "used for backwards compatibility with userspace, "
8739 "see documentation"); 8768 "see documentation");
8740 8769
8770#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
8741module_param_named(volume_mode, volume_mode, uint, 0444); 8771module_param_named(volume_mode, volume_mode, uint, 0444);
8742MODULE_PARM_DESC(volume_mode, 8772MODULE_PARM_DESC(volume_mode,
8743 "Selects volume control strategy: " 8773 "Selects volume control strategy: "
@@ -8760,6 +8790,7 @@ module_param_named(id, alsa_id, charp, 0444);
8760MODULE_PARM_DESC(id, "ALSA id for the ACPI EC Mixer"); 8790MODULE_PARM_DESC(id, "ALSA id for the ACPI EC Mixer");
8761module_param_named(enable, alsa_enable, bool, 0444); 8791module_param_named(enable, alsa_enable, bool, 0444);
8762MODULE_PARM_DESC(enable, "Enable the ALSA interface for the ACPI EC Mixer"); 8792MODULE_PARM_DESC(enable, "Enable the ALSA interface for the ACPI EC Mixer");
8793#endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
8763 8794
8764#define TPACPI_PARAM(feature) \ 8795#define TPACPI_PARAM(feature) \
8765 module_param_call(feature, set_ibm_param, NULL, NULL, 0); \ 8796 module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 9f93d6c0f510..b104302fea0a 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -492,8 +492,7 @@ wmi_notify_handler handler, void *data)
492 if (!guid || !handler) 492 if (!guid || !handler)
493 return AE_BAD_PARAMETER; 493 return AE_BAD_PARAMETER;
494 494
495 find_guid(guid, &block); 495 if (!find_guid(guid, &block))
496 if (!block)
497 return AE_NOT_EXIST; 496 return AE_NOT_EXIST;
498 497
499 if (block->handler) 498 if (block->handler)
@@ -521,8 +520,7 @@ acpi_status wmi_remove_notify_handler(const char *guid)
521 if (!guid) 520 if (!guid)
522 return AE_BAD_PARAMETER; 521 return AE_BAD_PARAMETER;
523 522
524 find_guid(guid, &block); 523 if (!find_guid(guid, &block))
525 if (!block)
526 return AE_NOT_EXIST; 524 return AE_NOT_EXIST;
527 525
528 if (!block->handler) 526 if (!block->handler)
@@ -716,6 +714,22 @@ static int wmi_class_init(void)
716 return ret; 714 return ret;
717} 715}
718 716
717static bool guid_already_parsed(const char *guid_string)
718{
719 struct guid_block *gblock;
720 struct wmi_block *wblock;
721 struct list_head *p;
722
723 list_for_each(p, &wmi_blocks.list) {
724 wblock = list_entry(p, struct wmi_block, list);
725 gblock = &wblock->gblock;
726
727 if (strncmp(gblock->guid, guid_string, 16) == 0)
728 return true;
729 }
730 return false;
731}
732
719/* 733/*
720 * Parse the _WDG method for the GUID data blocks 734 * Parse the _WDG method for the GUID data blocks
721 */ 735 */
@@ -725,6 +739,7 @@ static __init acpi_status parse_wdg(acpi_handle handle)
725 union acpi_object *obj; 739 union acpi_object *obj;
726 struct guid_block *gblock; 740 struct guid_block *gblock;
727 struct wmi_block *wblock; 741 struct wmi_block *wblock;
742 char guid_string[37];
728 acpi_status status; 743 acpi_status status;
729 u32 i, total; 744 u32 i, total;
730 745
@@ -747,6 +762,19 @@ static __init acpi_status parse_wdg(acpi_handle handle)
747 memcpy(gblock, obj->buffer.pointer, obj->buffer.length); 762 memcpy(gblock, obj->buffer.pointer, obj->buffer.length);
748 763
749 for (i = 0; i < total; i++) { 764 for (i = 0; i < total; i++) {
765 /*
766 Some WMI devices, like those for nVidia hooks, have a
767 duplicate GUID. It's not clear what we should do in this
768 case yet, so for now, we'll just ignore the duplicate.
769 Anyone who wants to add support for that device can come
770 up with a better workaround for the mess then.
771 */
772 if (guid_already_parsed(gblock[i].guid) == true) {
773 wmi_gtoa(gblock[i].guid, guid_string);
774 printk(KERN_INFO PREFIX "Skipping duplicate GUID %s\n",
775 guid_string);
776 continue;
777 }
750 wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL); 778 wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
751 if (!wblock) 779 if (!wblock)
752 return AE_NO_MEMORY; 780 return AE_NO_MEMORY;
diff --git a/drivers/power/pmu_battery.c b/drivers/power/pmu_battery.c
index 9346a862f1f2..9c87ad564803 100644
--- a/drivers/power/pmu_battery.c
+++ b/drivers/power/pmu_battery.c
@@ -89,6 +89,8 @@ static int pmu_bat_get_property(struct power_supply *psy,
89 case POWER_SUPPLY_PROP_STATUS: 89 case POWER_SUPPLY_PROP_STATUS:
90 if (pbi->flags & PMU_BATT_CHARGING) 90 if (pbi->flags & PMU_BATT_CHARGING)
91 val->intval = POWER_SUPPLY_STATUS_CHARGING; 91 val->intval = POWER_SUPPLY_STATUS_CHARGING;
92 else if (pmu_power_flags & PMU_PWR_AC_PRESENT)
93 val->intval = POWER_SUPPLY_STATUS_FULL;
92 else 94 else
93 val->intval = POWER_SUPPLY_STATUS_DISCHARGING; 95 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
94 break; 96 break;
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index fa39e759a275..6ea3cb5837c7 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
175 dev_err(&dev->dev, "Do not pass platform_data through " 175 dev_err(&dev->dev, "Do not pass platform_data through "
176 "wm97xx_bat_set_pdata!\n"); 176 "wm97xx_bat_set_pdata!\n");
177 return -EINVAL; 177 return -EINVAL;
178 } else 178 }
179 pdata = wmdata->batt_pdata; 179
180 if (!wmdata) {
181 dev_err(&dev->dev, "No platform data supplied\n");
182 return -EINVAL;
183 }
184
185 pdata = wmdata->batt_pdata;
180 186
181 if (dev->id != -1) 187 if (dev->id != -1)
182 return -EINVAL; 188 return -EINVAL;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 686ef270ecf7..b60a4c9f8f16 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -661,7 +661,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
661static void print_constraints(struct regulator_dev *rdev) 661static void print_constraints(struct regulator_dev *rdev)
662{ 662{
663 struct regulation_constraints *constraints = rdev->constraints; 663 struct regulation_constraints *constraints = rdev->constraints;
664 char buf[80]; 664 char buf[80] = "";
665 int count = 0; 665 int count = 0;
666 int ret; 666 int ret;
667 667
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 76d08c282f9c..4f33a0f4a179 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -183,7 +183,7 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
183 if (vol_map[val] >= min_vol) 183 if (vol_map[val] >= min_vol)
184 break; 184 break;
185 185
186 if (vol_map[val] > max_vol) 186 if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
187 return -EINVAL; 187 return -EINVAL;
188 188
189 return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), 189 return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
@@ -272,7 +272,7 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
272 if (vol_map[val] >= min_vol) 272 if (vol_map[val] >= min_vol)
273 break; 273 break;
274 274
275 if (vol_map[val] > max_vol) 275 if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
276 return -EINVAL; 276 return -EINVAL;
277 277
278 ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck), 278 ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 1bbff099a546..e7b89e704af6 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1504,7 +1504,8 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
1504 led->isink_init.consumer_supplies = &led->isink_consumer; 1504 led->isink_init.consumer_supplies = &led->isink_consumer;
1505 led->isink_init.constraints.min_uA = 0; 1505 led->isink_init.constraints.min_uA = 0;
1506 led->isink_init.constraints.max_uA = pdata->max_uA; 1506 led->isink_init.constraints.max_uA = pdata->max_uA;
1507 led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT; 1507 led->isink_init.constraints.valid_ops_mask
1508 = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS;
1508 led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; 1509 led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
1509 ret = wm8350_register_regulator(wm8350, isink, &led->isink_init); 1510 ret = wm8350_register_regulator(wm8350, isink, &led->isink_init);
1510 if (ret != 0) { 1511 if (ret != 0) {
@@ -1517,6 +1518,7 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
1517 led->dcdc_init.num_consumer_supplies = 1; 1518 led->dcdc_init.num_consumer_supplies = 1;
1518 led->dcdc_init.consumer_supplies = &led->dcdc_consumer; 1519 led->dcdc_init.consumer_supplies = &led->dcdc_consumer;
1519 led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; 1520 led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
1521 led->dcdc_init.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
1520 ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init); 1522 ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init);
1521 if (ret != 0) { 1523 if (ret != 0) {
1522 platform_device_put(pdev); 1524 platform_device_put(pdev);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c8c12325e69b..e9aa814ddd23 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1096,9 +1096,9 @@ static int cmos_pnp_resume(struct pnp_dev *pnp)
1096#define cmos_pnp_resume NULL 1096#define cmos_pnp_resume NULL
1097#endif 1097#endif
1098 1098
1099static void cmos_pnp_shutdown(struct device *pdev) 1099static void cmos_pnp_shutdown(struct pnp_dev *pnp)
1100{ 1100{
1101 if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(pdev)) 1101 if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev))
1102 return; 1102 return;
1103 1103
1104 cmos_do_shutdown(); 1104 cmos_do_shutdown();
@@ -1117,15 +1117,12 @@ static struct pnp_driver cmos_pnp_driver = {
1117 .id_table = rtc_ids, 1117 .id_table = rtc_ids,
1118 .probe = cmos_pnp_probe, 1118 .probe = cmos_pnp_probe,
1119 .remove = __exit_p(cmos_pnp_remove), 1119 .remove = __exit_p(cmos_pnp_remove),
1120 .shutdown = cmos_pnp_shutdown,
1120 1121
1121 /* flag ensures resume() gets called, and stops syslog spam */ 1122 /* flag ensures resume() gets called, and stops syslog spam */
1122 .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, 1123 .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
1123 .suspend = cmos_pnp_suspend, 1124 .suspend = cmos_pnp_suspend,
1124 .resume = cmos_pnp_resume, 1125 .resume = cmos_pnp_resume,
1125 .driver = {
1126 .name = (char *)driver_name,
1127 .shutdown = cmos_pnp_shutdown,
1128 }
1129}; 1126};
1130 1127
1131#endif /* CONFIG_PNP */ 1128#endif /* CONFIG_PNP */
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 3a7be11cc6b9..812c66755083 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -376,20 +376,22 @@ static int __devinit fm3130_probe(struct i2c_client *client,
376 } 376 }
377 377
378 /* Disabling calibration mode */ 378 /* Disabling calibration mode */
379 if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) 379 if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) {
380 i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, 380 i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
381 fm3130->regs[FM3130_RTC_CONTROL] & 381 fm3130->regs[FM3130_RTC_CONTROL] &
382 ~(FM3130_RTC_CONTROL_BIT_CAL)); 382 ~(FM3130_RTC_CONTROL_BIT_CAL));
383 dev_warn(&client->dev, "Disabling calibration mode!\n"); 383 dev_warn(&client->dev, "Disabling calibration mode!\n");
384 }
384 385
385 /* Disabling read and write modes */ 386 /* Disabling read and write modes */
386 if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE || 387 if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE ||
387 fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) 388 fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) {
388 i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, 389 i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
389 fm3130->regs[FM3130_RTC_CONTROL] & 390 fm3130->regs[FM3130_RTC_CONTROL] &
390 ~(FM3130_RTC_CONTROL_BIT_READ | 391 ~(FM3130_RTC_CONTROL_BIT_READ |
391 FM3130_RTC_CONTROL_BIT_WRITE)); 392 FM3130_RTC_CONTROL_BIT_WRITE));
392 dev_warn(&client->dev, "Disabling READ or WRITE mode!\n"); 393 dev_warn(&client->dev, "Disabling READ or WRITE mode!\n");
394 }
393 395
394 /* oscillator off? turn it on, so clock can tick. */ 396 /* oscillator off? turn it on, so clock can tick. */
395 if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN) 397 if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN)
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fdb2e7c14506..5905936c7c60 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1004,8 +1004,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
1004 if (device == NULL || 1004 if (device == NULL ||
1005 device != dasd_device_from_cdev_locked(cdev) || 1005 device != dasd_device_from_cdev_locked(cdev) ||
1006 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1006 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1007 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " 1007 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1008 "bus_id %s", dev_name(&cdev->dev)); 1008 "invalid device in request");
1009 return; 1009 return;
1010 } 1010 }
1011 1011
@@ -1078,8 +1078,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1078 device = (struct dasd_device *) cqr->startdev; 1078 device = (struct dasd_device *) cqr->startdev;
1079 if (!device || 1079 if (!device ||
1080 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1080 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1081 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " 1081 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1082 "bus_id %s", dev_name(&cdev->dev)); 1082 "invalid device in request");
1083 return; 1083 return;
1084 } 1084 }
1085 1085
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5819dc02a143..1cca21aafaba 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -23,6 +23,7 @@
23#include <asm/debug.h> 23#include <asm/debug.h>
24#include <asm/idals.h> 24#include <asm/idals.h>
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/compat.h>
26#include <asm/io.h> 27#include <asm/io.h>
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28#include <asm/cio.h> 29#include <asm/cio.h>
@@ -2844,13 +2845,16 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2844 rc = -EFAULT; 2845 rc = -EFAULT;
2845 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 2846 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
2846 goto out; 2847 goto out;
2847#ifndef CONFIG_64BIT 2848 if (is_compat_task() || sizeof(long) == 4) {
2848 /* Make sure pointers are sane even on 31 bit. */ 2849 /* Make sure pointers are sane even on 31 bit. */
2849 if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) {
2850 rc = -EINVAL; 2850 rc = -EINVAL;
2851 goto out; 2851 if ((usrparm.psf_data >> 32) != 0)
2852 goto out;
2853 if ((usrparm.rssd_result >> 32) != 0)
2854 goto out;
2855 usrparm.psf_data &= 0x7fffffffULL;
2856 usrparm.rssd_result &= 0x7fffffffULL;
2852 } 2857 }
2853#endif
2854 /* alloc I/O data area */ 2858 /* alloc I/O data area */
2855 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 2859 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
2856 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 2860 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
@@ -3029,7 +3033,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3029 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3033 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3030 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", 3034 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3031 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3035 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3032 scsw_cc(&irb->scsw), req->intrc); 3036 scsw_cc(&irb->scsw), req ? req->intrc : 0);
3033 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3037 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3034 " device %s: Failing CCW: %p\n", 3038 " device %s: Failing CCW: %p\n",
3035 dev_name(&device->cdev->dev), 3039 dev_name(&device->cdev->dev),
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 478bcdb90b6f..7039d9cf0fb4 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -17,7 +17,7 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/blkpg.h> 18#include <linux/blkpg.h>
19#include <linux/smp_lock.h> 19#include <linux/smp_lock.h>
20 20#include <asm/compat.h>
21#include <asm/ccwdev.h> 21#include <asm/ccwdev.h>
22#include <asm/cmb.h> 22#include <asm/cmb.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
@@ -260,7 +260,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
260 struct ccw_dev_id dev_id; 260 struct ccw_dev_id dev_id;
261 261
262 base = block->base; 262 base = block->base;
263 if (!base->discipline->fill_info) 263 if (!base->discipline || !base->discipline->fill_info)
264 return -EINVAL; 264 return -EINVAL;
265 265
266 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); 266 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
@@ -303,10 +303,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
303 dasd_info->features |= 303 dasd_info->features |=
304 ((base->features & DASD_FEATURE_READONLY) != 0); 304 ((base->features & DASD_FEATURE_READONLY) != 0);
305 305
306 if (base->discipline) 306 memcpy(dasd_info->type, base->discipline->name, 4);
307 memcpy(dasd_info->type, base->discipline->name, 4);
308 else
309 memcpy(dasd_info->type, "none", 4);
310 307
311 if (block->request_queue->request_fn) { 308 if (block->request_queue->request_fn) {
312 struct list_head *l; 309 struct list_head *l;
@@ -358,9 +355,8 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
358} 355}
359 356
360static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, 357static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
361 unsigned long arg) 358 struct cmbdata __user *argp)
362{ 359{
363 struct cmbdata __user *argp = (void __user *) arg;
364 size_t size = _IOC_SIZE(cmd); 360 size_t size = _IOC_SIZE(cmd);
365 struct cmbdata data; 361 struct cmbdata data;
366 int ret; 362 int ret;
@@ -376,7 +372,12 @@ dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
376 unsigned int cmd, unsigned long arg) 372 unsigned int cmd, unsigned long arg)
377{ 373{
378 struct dasd_block *block = bdev->bd_disk->private_data; 374 struct dasd_block *block = bdev->bd_disk->private_data;
379 void __user *argp = (void __user *)arg; 375 void __user *argp;
376
377 if (is_compat_task())
378 argp = compat_ptr(arg);
379 else
380 argp = (void __user *)arg;
380 381
381 if (!block) 382 if (!block)
382 return -ENODEV; 383 return -ENODEV;
@@ -414,7 +415,7 @@ dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
414 case BIODASDCMFDISABLE: 415 case BIODASDCMFDISABLE:
415 return disable_cmf(block->base->cdev); 416 return disable_cmf(block->base->cdev);
416 case BIODASDREADALLCMB: 417 case BIODASDREADALLCMB:
417 return dasd_ioctl_readall_cmb(block, cmd, arg); 418 return dasd_ioctl_readall_cmb(block, cmd, argp);
418 default: 419 default:
419 /* if the discipline has an ioctl method try it. */ 420 /* if the discipline has an ioctl method try it. */
420 if (block->base->discipline->ioctl) { 421 if (block->base->discipline->ioctl) {
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 6315fbd8e68b..71f95f54866f 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -72,7 +72,7 @@ dasd_devices_show(struct seq_file *m, void *v)
72 /* Print device number. */ 72 /* Print device number. */
73 seq_printf(m, "%s", dev_name(&device->cdev->dev)); 73 seq_printf(m, "%s", dev_name(&device->cdev->dev));
74 /* Print discipline string. */ 74 /* Print discipline string. */
75 if (device != NULL && device->discipline != NULL) 75 if (device->discipline != NULL)
76 seq_printf(m, "(%s)", device->discipline->name); 76 seq_printf(m, "(%s)", device->discipline->name);
77 else 77 else
78 seq_printf(m, "(none)"); 78 seq_printf(m, "(none)");
@@ -92,10 +92,7 @@ dasd_devices_show(struct seq_file *m, void *v)
92 substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " "; 92 substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
93 seq_printf(m, "%4s: ", substr); 93 seq_printf(m, "%4s: ", substr);
94 /* Print device status information. */ 94 /* Print device status information. */
95 switch ((device != NULL) ? device->state : -1) { 95 switch (device->state) {
96 case -1:
97 seq_printf(m, "unknown");
98 break;
99 case DASD_STATE_NEW: 96 case DASD_STATE_NEW:
100 seq_printf(m, "new"); 97 seq_printf(m, "new");
101 break; 98 break;
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9d61683b5633..59ec073724bf 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1037,22 +1037,6 @@ static void tty3215_flush_buffer(struct tty_struct *tty)
1037} 1037}
1038 1038
1039/* 1039/*
1040 * Currently we don't have any io controls for 3215 ttys
1041 */
1042static int tty3215_ioctl(struct tty_struct *tty, struct file * file,
1043 unsigned int cmd, unsigned long arg)
1044{
1045 if (tty->flags & (1 << TTY_IO_ERROR))
1046 return -EIO;
1047
1048 switch (cmd) {
1049 default:
1050 return -ENOIOCTLCMD;
1051 }
1052 return 0;
1053}
1054
1055/*
1056 * Disable reading from a 3215 tty 1040 * Disable reading from a 3215 tty
1057 */ 1041 */
1058static void tty3215_throttle(struct tty_struct * tty) 1042static void tty3215_throttle(struct tty_struct * tty)
@@ -1117,7 +1101,6 @@ static const struct tty_operations tty3215_ops = {
1117 .write_room = tty3215_write_room, 1101 .write_room = tty3215_write_room,
1118 .chars_in_buffer = tty3215_chars_in_buffer, 1102 .chars_in_buffer = tty3215_chars_in_buffer,
1119 .flush_buffer = tty3215_flush_buffer, 1103 .flush_buffer = tty3215_flush_buffer,
1120 .ioctl = tty3215_ioctl,
1121 .throttle = tty3215_throttle, 1104 .throttle = tty3215_throttle,
1122 .unthrottle = tty3215_unthrottle, 1105 .unthrottle = tty3215_unthrottle,
1123 .stop = tty3215_stop, 1106 .stop = tty3215_stop,
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 247b2b934728..31c59b0d6df0 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/smp_lock.h> 16#include <linux/smp_lock.h>
17 17
18#include <asm/compat.h>
18#include <asm/ccwdev.h> 19#include <asm/ccwdev.h>
19#include <asm/cio.h> 20#include <asm/cio.h>
20#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
@@ -322,6 +323,7 @@ fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *o
322static long 323static long
323fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 324fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
324{ 325{
326 char __user *argp;
325 struct fs3270 *fp; 327 struct fs3270 *fp;
326 struct raw3270_iocb iocb; 328 struct raw3270_iocb iocb;
327 int rc; 329 int rc;
@@ -329,6 +331,10 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
329 fp = filp->private_data; 331 fp = filp->private_data;
330 if (!fp) 332 if (!fp)
331 return -ENODEV; 333 return -ENODEV;
334 if (is_compat_task())
335 argp = compat_ptr(arg);
336 else
337 argp = (char __user *)arg;
332 rc = 0; 338 rc = 0;
333 mutex_lock(&fs3270_mutex); 339 mutex_lock(&fs3270_mutex);
334 switch (cmd) { 340 switch (cmd) {
@@ -339,10 +345,10 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
339 fp->write_command = arg; 345 fp->write_command = arg;
340 break; 346 break;
341 case TUBGETI: 347 case TUBGETI:
342 rc = put_user(fp->read_command, (char __user *) arg); 348 rc = put_user(fp->read_command, argp);
343 break; 349 break;
344 case TUBGETO: 350 case TUBGETO:
345 rc = put_user(fp->write_command,(char __user *) arg); 351 rc = put_user(fp->write_command, argp);
346 break; 352 break;
347 case TUBGETMOD: 353 case TUBGETMOD:
348 iocb.model = fp->view.model; 354 iocb.model = fp->view.model;
@@ -351,8 +357,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
351 iocb.pf_cnt = 24; 357 iocb.pf_cnt = 24;
352 iocb.re_cnt = 20; 358 iocb.re_cnt = 20;
353 iocb.map = 0; 359 iocb.map = 0;
354 if (copy_to_user((char __user *) arg, &iocb, 360 if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
355 sizeof(struct raw3270_iocb)))
356 rc = -EFAULT; 361 rc = -EFAULT;
357 break; 362 break;
358 } 363 }
@@ -511,8 +516,8 @@ static const struct file_operations fs3270_fops = {
511 .write = fs3270_write, /* write */ 516 .write = fs3270_write, /* write */
512 .unlocked_ioctl = fs3270_ioctl, /* ioctl */ 517 .unlocked_ioctl = fs3270_ioctl, /* ioctl */
513 .compat_ioctl = fs3270_ioctl, /* ioctl */ 518 .compat_ioctl = fs3270_ioctl, /* ioctl */
514 .open = fs3270_open, /* open */ 519 .open = fs3270_open, /* open */
515 .release = fs3270_close, /* release */ 520 .release = fs3270_close, /* release */
516}; 521};
517 522
518/* 523/*
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index b9d2a007e93b..3796ffdb8479 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -495,6 +495,10 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp)
495 if (tty->driver_data == NULL) 495 if (tty->driver_data == NULL)
496 return -ENOMEM; 496 return -ENOMEM;
497 tty->low_latency = 0; 497 tty->low_latency = 0;
498 if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
499 tty->winsize.ws_row = 24;
500 tty->winsize.ws_col = 80;
501 }
498 } 502 }
499 return 0; 503 return 0;
500} 504}
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 96816149368a..8d3d720737da 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -46,8 +46,6 @@
46 */ 46 */
47static int tapeblock_open(struct block_device *, fmode_t); 47static int tapeblock_open(struct block_device *, fmode_t);
48static int tapeblock_release(struct gendisk *, fmode_t); 48static int tapeblock_release(struct gendisk *, fmode_t);
49static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
50 unsigned long);
51static int tapeblock_medium_changed(struct gendisk *); 49static int tapeblock_medium_changed(struct gendisk *);
52static int tapeblock_revalidate_disk(struct gendisk *); 50static int tapeblock_revalidate_disk(struct gendisk *);
53 51
@@ -55,7 +53,6 @@ static const struct block_device_operations tapeblock_fops = {
55 .owner = THIS_MODULE, 53 .owner = THIS_MODULE,
56 .open = tapeblock_open, 54 .open = tapeblock_open,
57 .release = tapeblock_release, 55 .release = tapeblock_release,
58 .ioctl = tapeblock_ioctl,
59 .media_changed = tapeblock_medium_changed, 56 .media_changed = tapeblock_medium_changed,
60 .revalidate_disk = tapeblock_revalidate_disk, 57 .revalidate_disk = tapeblock_revalidate_disk,
61}; 58};
@@ -416,42 +413,6 @@ tapeblock_release(struct gendisk *disk, fmode_t mode)
416} 413}
417 414
418/* 415/*
419 * Support of some generic block device IOCTLs.
420 */
421static int
422tapeblock_ioctl(
423 struct block_device * bdev,
424 fmode_t mode,
425 unsigned int command,
426 unsigned long arg
427) {
428 int rc;
429 int minor;
430 struct gendisk *disk = bdev->bd_disk;
431 struct tape_device *device;
432
433 rc = 0;
434 BUG_ON(!disk);
435 device = disk->private_data;
436 BUG_ON(!device);
437 minor = MINOR(bdev->bd_dev);
438
439 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
440 DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
441
442 switch (command) {
443 /* Refuse some IOCTL calls without complaining (mount). */
444 case 0x5310: /* CDROMMULTISESSION */
445 rc = -EINVAL;
446 break;
447 default:
448 rc = -EINVAL;
449 }
450
451 return rc;
452}
453
454/*
455 * Initialize block device frontend. 416 * Initialize block device frontend.
456 */ 417 */
457int 418int
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 2125ec7d95f0..539045acaad4 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -18,6 +18,7 @@
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/mtio.h> 19#include <linux/mtio.h>
20#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
21#include <linux/compat.h>
21 22
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
23 24
@@ -37,8 +38,9 @@ static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t
37static int tapechar_open(struct inode *,struct file *); 38static int tapechar_open(struct inode *,struct file *);
38static int tapechar_release(struct inode *,struct file *); 39static int tapechar_release(struct inode *,struct file *);
39static long tapechar_ioctl(struct file *, unsigned int, unsigned long); 40static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
40static long tapechar_compat_ioctl(struct file *, unsigned int, 41#ifdef CONFIG_COMPAT
41 unsigned long); 42static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long);
43#endif
42 44
43static const struct file_operations tape_fops = 45static const struct file_operations tape_fops =
44{ 46{
@@ -46,7 +48,9 @@ static const struct file_operations tape_fops =
46 .read = tapechar_read, 48 .read = tapechar_read,
47 .write = tapechar_write, 49 .write = tapechar_write,
48 .unlocked_ioctl = tapechar_ioctl, 50 .unlocked_ioctl = tapechar_ioctl,
51#ifdef CONFIG_COMPAT
49 .compat_ioctl = tapechar_compat_ioctl, 52 .compat_ioctl = tapechar_compat_ioctl,
53#endif
50 .open = tapechar_open, 54 .open = tapechar_open,
51 .release = tapechar_release, 55 .release = tapechar_release,
52}; 56};
@@ -457,15 +461,22 @@ tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
457 return rc; 461 return rc;
458} 462}
459 463
464#ifdef CONFIG_COMPAT
460static long 465static long
461tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data) 466tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
462{ 467{
463 struct tape_device *device = filp->private_data; 468 struct tape_device *device = filp->private_data;
464 int rval = -ENOIOCTLCMD; 469 int rval = -ENOIOCTLCMD;
470 unsigned long argp;
465 471
472 /* The 'arg' argument of any ioctl function may only be used for
473 * pointers because of the compat pointer conversion.
474 * Consider this when adding new ioctls.
475 */
476 argp = (unsigned long) compat_ptr(data);
466 if (device->discipline->ioctl_fn) { 477 if (device->discipline->ioctl_fn) {
467 mutex_lock(&device->mutex); 478 mutex_lock(&device->mutex);
468 rval = device->discipline->ioctl_fn(device, no, data); 479 rval = device->discipline->ioctl_fn(device, no, argp);
469 mutex_unlock(&device->mutex); 480 mutex_unlock(&device->mutex);
470 if (rval == -EINVAL) 481 if (rval == -EINVAL)
471 rval = -ENOIOCTLCMD; 482 rval = -ENOIOCTLCMD;
@@ -473,6 +484,7 @@ tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
473 484
474 return rval; 485 return rval;
475} 486}
487#endif /* CONFIG_COMPAT */
476 488
477/* 489/*
478 * Initialize character device frontend. 490 * Initialize character device frontend.
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index a6087cec55b4..921dcda77676 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/miscdevice.h> 20#include <linux/miscdevice.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <asm/compat.h>
22#include <asm/cpcmd.h> 23#include <asm/cpcmd.h>
23#include <asm/debug.h> 24#include <asm/debug.h>
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
@@ -139,21 +140,26 @@ vmcp_write(struct file *file, const char __user *buff, size_t count,
139static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 140static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
140{ 141{
141 struct vmcp_session *session; 142 struct vmcp_session *session;
143 int __user *argp;
142 int temp; 144 int temp;
143 145
144 session = (struct vmcp_session *)file->private_data; 146 session = (struct vmcp_session *)file->private_data;
147 if (is_compat_task())
148 argp = compat_ptr(arg);
149 else
150 argp = (int __user *)arg;
145 if (mutex_lock_interruptible(&session->mutex)) 151 if (mutex_lock_interruptible(&session->mutex))
146 return -ERESTARTSYS; 152 return -ERESTARTSYS;
147 switch (cmd) { 153 switch (cmd) {
148 case VMCP_GETCODE: 154 case VMCP_GETCODE:
149 temp = session->resp_code; 155 temp = session->resp_code;
150 mutex_unlock(&session->mutex); 156 mutex_unlock(&session->mutex);
151 return put_user(temp, (int __user *)arg); 157 return put_user(temp, argp);
152 case VMCP_SETBUF: 158 case VMCP_SETBUF:
153 free_pages((unsigned long)session->response, 159 free_pages((unsigned long)session->response,
154 get_order(session->bufsize)); 160 get_order(session->bufsize));
155 session->response=NULL; 161 session->response=NULL;
156 temp = get_user(session->bufsize, (int __user *)arg); 162 temp = get_user(session->bufsize, argp);
157 if (get_order(session->bufsize) > 8) { 163 if (get_order(session->bufsize) > 8) {
158 session->bufsize = PAGE_SIZE; 164 session->bufsize = PAGE_SIZE;
159 temp = -EINVAL; 165 temp = -EINVAL;
@@ -163,7 +169,7 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
163 case VMCP_GETSIZE: 169 case VMCP_GETSIZE:
164 temp = session->resp_size; 170 temp = session->resp_size;
165 mutex_unlock(&session->mutex); 171 mutex_unlock(&session->mutex);
166 return put_user(temp, (int __user *)arg); 172 return put_user(temp, argp);
167 default: 173 default:
168 mutex_unlock(&session->mutex); 174 mutex_unlock(&session->mutex);
169 return -ENOIOCTLCMD; 175 return -ENOIOCTLCMD;
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index d033414f7599..e1b700a19648 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -10,5 +10,5 @@ obj-y += ccw_device.o cmf.o
10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o 10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
11obj-$(CONFIG_CCWGROUP) += ccwgroup.o 11obj-$(CONFIG_CCWGROUP) += ccwgroup.o
12 12
13qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o 13qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
14obj-$(CONFIG_QDIO) += qdio.o 14obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index cc5144b6f9d9..c84ac9443079 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -12,6 +12,7 @@
12#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/miscdevice.h> 13#include <linux/miscdevice.h>
14 14
15#include <asm/compat.h>
15#include <asm/cio.h> 16#include <asm/cio.h>
16#include <asm/chsc.h> 17#include <asm/chsc.h>
17#include <asm/isc.h> 18#include <asm/isc.h>
@@ -770,24 +771,30 @@ out_free:
770static long chsc_ioctl(struct file *filp, unsigned int cmd, 771static long chsc_ioctl(struct file *filp, unsigned int cmd,
771 unsigned long arg) 772 unsigned long arg)
772{ 773{
774 void __user *argp;
775
773 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); 776 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
777 if (is_compat_task())
778 argp = compat_ptr(arg);
779 else
780 argp = (void __user *)arg;
774 switch (cmd) { 781 switch (cmd) {
775 case CHSC_START: 782 case CHSC_START:
776 return chsc_ioctl_start((void __user *)arg); 783 return chsc_ioctl_start(argp);
777 case CHSC_INFO_CHANNEL_PATH: 784 case CHSC_INFO_CHANNEL_PATH:
778 return chsc_ioctl_info_channel_path((void __user *)arg); 785 return chsc_ioctl_info_channel_path(argp);
779 case CHSC_INFO_CU: 786 case CHSC_INFO_CU:
780 return chsc_ioctl_info_cu((void __user *)arg); 787 return chsc_ioctl_info_cu(argp);
781 case CHSC_INFO_SCH_CU: 788 case CHSC_INFO_SCH_CU:
782 return chsc_ioctl_info_sch_cu((void __user *)arg); 789 return chsc_ioctl_info_sch_cu(argp);
783 case CHSC_INFO_CI: 790 case CHSC_INFO_CI:
784 return chsc_ioctl_conf_info((void __user *)arg); 791 return chsc_ioctl_conf_info(argp);
785 case CHSC_INFO_CCL: 792 case CHSC_INFO_CCL:
786 return chsc_ioctl_conf_comp_list((void __user *)arg); 793 return chsc_ioctl_conf_comp_list(argp);
787 case CHSC_INFO_CPD: 794 case CHSC_INFO_CPD:
788 return chsc_ioctl_chpd((void __user *)arg); 795 return chsc_ioctl_chpd(argp);
789 case CHSC_INFO_DCAL: 796 case CHSC_INFO_DCAL:
790 return chsc_ioctl_dcal((void __user *)arg); 797 return chsc_ioctl_dcal(argp);
791 default: /* unknown ioctl number */ 798 default: /* unknown ioctl number */
792 return -ENOIOCTLCMD; 799 return -ENOIOCTLCMD;
793 } 800 }
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ff7748a9199d..44f2f6a97f33 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -182,6 +182,34 @@ struct scssc_area {
182 u32:32; 182 u32:32;
183} __attribute__ ((packed)); 183} __attribute__ ((packed));
184 184
185struct qdio_dev_perf_stat {
186 unsigned int adapter_int;
187 unsigned int qdio_int;
188 unsigned int pci_request_int;
189
190 unsigned int tasklet_inbound;
191 unsigned int tasklet_inbound_resched;
192 unsigned int tasklet_inbound_resched2;
193 unsigned int tasklet_outbound;
194
195 unsigned int siga_read;
196 unsigned int siga_write;
197 unsigned int siga_sync;
198
199 unsigned int inbound_call;
200 unsigned int inbound_handler;
201 unsigned int stop_polling;
202 unsigned int inbound_queue_full;
203 unsigned int outbound_call;
204 unsigned int outbound_handler;
205 unsigned int fast_requeue;
206 unsigned int target_full;
207 unsigned int eqbs;
208 unsigned int eqbs_partial;
209 unsigned int sqbs;
210 unsigned int sqbs_partial;
211};
212
185struct qdio_input_q { 213struct qdio_input_q {
186 /* input buffer acknowledgement flag */ 214 /* input buffer acknowledgement flag */
187 int polling; 215 int polling;
@@ -269,6 +297,7 @@ struct qdio_irq {
269 u32 *dsci; /* address of device state change indicator */ 297 u32 *dsci; /* address of device state change indicator */
270 struct ccw_device *cdev; 298 struct ccw_device *cdev;
271 struct dentry *debugfs_dev; 299 struct dentry *debugfs_dev;
300 struct dentry *debugfs_perf;
272 301
273 unsigned long int_parm; 302 unsigned long int_parm;
274 struct subchannel_id schid; 303 struct subchannel_id schid;
@@ -286,9 +315,10 @@ struct qdio_irq {
286 struct ciw aqueue; 315 struct ciw aqueue;
287 316
288 struct qdio_ssqd_desc ssqd_desc; 317 struct qdio_ssqd_desc ssqd_desc;
289
290 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); 318 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
291 319
320 struct qdio_dev_perf_stat perf_stat;
321 int perf_stat_enabled;
292 /* 322 /*
293 * Warning: Leave these members together at the end so they won't be 323 * Warning: Leave these members together at the end so they won't be
294 * cleared in qdio_setup_irq. 324 * cleared in qdio_setup_irq.
@@ -311,6 +341,10 @@ struct qdio_irq {
311 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ 341 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
312 css_general_characteristics.aif_osa) 342 css_general_characteristics.aif_osa)
313 343
344#define qperf(qdev,attr) qdev->perf_stat.attr
345#define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \
346 q->irq_ptr->perf_stat.attr++
347
314/* the highest iqdio queue is used for multicast */ 348/* the highest iqdio queue is used for multicast */
315static inline int multicast_outbound(struct qdio_q *q) 349static inline int multicast_outbound(struct qdio_q *q)
316{ 350{
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 76769978285f..f49761ff9a00 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -55,13 +55,11 @@ static int qstat_show(struct seq_file *m, void *v)
55 if (!q) 55 if (!q)
56 return 0; 56 return 0;
57 57
58 seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci); 58 seq_printf(m, "DSCI: %d nr_used: %d\n",
59 seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); 59 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
60 seq_printf(m, "ftc: %d\n", q->first_to_check); 60 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move);
61 seq_printf(m, "last_move: %d\n", q->last_move); 61 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
62 seq_printf(m, "polling: %d\n", q->u.in.polling); 62 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
63 seq_printf(m, "ack start: %d\n", q->u.in.ack_start);
64 seq_printf(m, "ack count: %d\n", q->u.in.ack_count);
65 seq_printf(m, "slsb buffer states:\n"); 63 seq_printf(m, "slsb buffer states:\n");
66 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 64 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
67 65
@@ -110,7 +108,6 @@ static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
110 108
111 if (!q) 109 if (!q)
112 return 0; 110 return 0;
113
114 if (q->is_input_q) 111 if (q->is_input_q)
115 xchg(q->irq_ptr->dsci, 1); 112 xchg(q->irq_ptr->dsci, 1);
116 local_bh_disable(); 113 local_bh_disable();
@@ -134,6 +131,98 @@ static const struct file_operations debugfs_fops = {
134 .release = single_release, 131 .release = single_release,
135}; 132};
136 133
134static char *qperf_names[] = {
135 "Assumed adapter interrupts",
136 "QDIO interrupts",
137 "Requested PCIs",
138 "Inbound tasklet runs",
139 "Inbound tasklet resched",
140 "Inbound tasklet resched2",
141 "Outbound tasklet runs",
142 "SIGA read",
143 "SIGA write",
144 "SIGA sync",
145 "Inbound calls",
146 "Inbound handler",
147 "Inbound stop_polling",
148 "Inbound queue full",
149 "Outbound calls",
150 "Outbound handler",
151 "Outbound fast_requeue",
152 "Outbound target_full",
153 "QEBSM eqbs",
154 "QEBSM eqbs partial",
155 "QEBSM sqbs",
156 "QEBSM sqbs partial"
157};
158
159static int qperf_show(struct seq_file *m, void *v)
160{
161 struct qdio_irq *irq_ptr = m->private;
162 unsigned int *stat;
163 int i;
164
165 if (!irq_ptr)
166 return 0;
167 if (!irq_ptr->perf_stat_enabled) {
168 seq_printf(m, "disabled\n");
169 return 0;
170 }
171 stat = (unsigned int *)&irq_ptr->perf_stat;
172
173 for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
174 seq_printf(m, "%26s:\t%u\n",
175 qperf_names[i], *(stat + i));
176 return 0;
177}
178
179static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
180 size_t count, loff_t *off)
181{
182 struct seq_file *seq = file->private_data;
183 struct qdio_irq *irq_ptr = seq->private;
184 unsigned long val;
185 char buf[8];
186 int ret;
187
188 if (!irq_ptr)
189 return 0;
190 if (count >= sizeof(buf))
191 return -EINVAL;
192 if (copy_from_user(&buf, ubuf, count))
193 return -EFAULT;
194 buf[count] = 0;
195
196 ret = strict_strtoul(buf, 10, &val);
197 if (ret < 0)
198 return ret;
199
200 switch (val) {
201 case 0:
202 irq_ptr->perf_stat_enabled = 0;
203 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
204 break;
205 case 1:
206 irq_ptr->perf_stat_enabled = 1;
207 break;
208 }
209 return count;
210}
211
212static int qperf_seq_open(struct inode *inode, struct file *filp)
213{
214 return single_open(filp, qperf_show,
215 filp->f_path.dentry->d_inode->i_private);
216}
217
218static struct file_operations debugfs_perf_fops = {
219 .owner = THIS_MODULE,
220 .open = qperf_seq_open,
221 .read = seq_read,
222 .write = qperf_seq_write,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
137static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) 226static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
138{ 227{
139 char name[QDIO_DEBUGFS_NAME_LEN]; 228 char name[QDIO_DEBUGFS_NAME_LEN];
@@ -156,6 +245,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
156 debugfs_root); 245 debugfs_root);
157 if (IS_ERR(irq_ptr->debugfs_dev)) 246 if (IS_ERR(irq_ptr->debugfs_dev))
158 irq_ptr->debugfs_dev = NULL; 247 irq_ptr->debugfs_dev = NULL;
248
249 irq_ptr->debugfs_perf = debugfs_create_file("statistics",
250 S_IFREG | S_IRUGO | S_IWUSR,
251 irq_ptr->debugfs_dev, irq_ptr,
252 &debugfs_perf_fops);
253 if (IS_ERR(irq_ptr->debugfs_perf))
254 irq_ptr->debugfs_perf = NULL;
255
159 for_each_input_queue(irq_ptr, q, i) 256 for_each_input_queue(irq_ptr, q, i)
160 setup_debugfs_entry(q, cdev); 257 setup_debugfs_entry(q, cdev);
161 for_each_output_queue(irq_ptr, q, i) 258 for_each_output_queue(irq_ptr, q, i)
@@ -171,6 +268,7 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
171 debugfs_remove(q->debugfs_q); 268 debugfs_remove(q->debugfs_q);
172 for_each_output_queue(irq_ptr, q, i) 269 for_each_output_queue(irq_ptr, q, i)
173 debugfs_remove(q->debugfs_q); 270 debugfs_remove(q->debugfs_q);
271 debugfs_remove(irq_ptr->debugfs_perf);
174 debugfs_remove(irq_ptr->debugfs_dev); 272 debugfs_remove(irq_ptr->debugfs_dev);
175} 273}
176 274
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index b2275c5000e7..62b654af9237 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -22,7 +22,6 @@
22#include "device.h" 22#include "device.h"
23#include "qdio.h" 23#include "qdio.h"
24#include "qdio_debug.h" 24#include "qdio_debug.h"
25#include "qdio_perf.h"
26 25
27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ 26MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>"); 27 "Jan Glauber <jang@linux.vnet.ibm.com>");
@@ -126,7 +125,7 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
126 int rc; 125 int rc;
127 126
128 BUG_ON(!q->irq_ptr->sch_token); 127 BUG_ON(!q->irq_ptr->sch_token);
129 qdio_perf_stat_inc(&perf_stats.debug_eqbs_all); 128 qperf_inc(q, eqbs);
130 129
131 if (!q->is_input_q) 130 if (!q->is_input_q)
132 nr += q->irq_ptr->nr_input_qs; 131 nr += q->irq_ptr->nr_input_qs;
@@ -139,7 +138,7 @@ again:
139 * buffers later. 138 * buffers later.
140 */ 139 */
141 if ((ccq == 96) && (count != tmp_count)) { 140 if ((ccq == 96) && (count != tmp_count)) {
142 qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete); 141 qperf_inc(q, eqbs_partial);
143 return (count - tmp_count); 142 return (count - tmp_count);
144 } 143 }
145 144
@@ -182,7 +181,7 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
182 return 0; 181 return 0;
183 182
184 BUG_ON(!q->irq_ptr->sch_token); 183 BUG_ON(!q->irq_ptr->sch_token);
185 qdio_perf_stat_inc(&perf_stats.debug_sqbs_all); 184 qperf_inc(q, sqbs);
186 185
187 if (!q->is_input_q) 186 if (!q->is_input_q)
188 nr += q->irq_ptr->nr_input_qs; 187 nr += q->irq_ptr->nr_input_qs;
@@ -191,7 +190,7 @@ again:
191 rc = qdio_check_ccq(q, ccq); 190 rc = qdio_check_ccq(q, ccq);
192 if (rc == 1) { 191 if (rc == 1) {
193 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 192 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
194 qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete); 193 qperf_inc(q, sqbs_partial);
195 goto again; 194 goto again;
196 } 195 }
197 if (rc < 0) { 196 if (rc < 0) {
@@ -285,7 +284,7 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
285 return 0; 284 return 0;
286 285
287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 286 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
288 qdio_perf_stat_inc(&perf_stats.siga_sync); 287 qperf_inc(q, siga_sync);
289 288
290 cc = do_siga_sync(q->irq_ptr->schid, output, input); 289 cc = do_siga_sync(q->irq_ptr->schid, output, input);
291 if (cc) 290 if (cc)
@@ -350,7 +349,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
350 int cc; 349 int cc;
351 350
352 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 351 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
353 qdio_perf_stat_inc(&perf_stats.siga_in); 352 qperf_inc(q, siga_read);
354 353
355 cc = do_siga_input(q->irq_ptr->schid, q->mask); 354 cc = do_siga_input(q->irq_ptr->schid, q->mask);
356 if (cc) 355 if (cc)
@@ -382,7 +381,7 @@ static inline void qdio_stop_polling(struct qdio_q *q)
382 return; 381 return;
383 382
384 q->u.in.polling = 0; 383 q->u.in.polling = 0;
385 qdio_perf_stat_inc(&perf_stats.debug_stop_polling); 384 qperf_inc(q, stop_polling);
386 385
387 /* show the card that we are not polling anymore */ 386 /* show the card that we are not polling anymore */
388 if (is_qebsm(q)) { 387 if (is_qebsm(q)) {
@@ -400,7 +399,7 @@ static void announce_buffer_error(struct qdio_q *q, int count)
400 /* special handling for no target buffer empty */ 399 /* special handling for no target buffer empty */
401 if ((!q->is_input_q && 400 if ((!q->is_input_q &&
402 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { 401 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
403 qdio_perf_stat_inc(&perf_stats.outbound_target_full); 402 qperf_inc(q, target_full);
404 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 403 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
405 q->first_to_check); 404 q->first_to_check);
406 return; 405 return;
@@ -487,7 +486,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
487 inbound_primed(q, count); 486 inbound_primed(q, count);
488 q->first_to_check = add_buf(q->first_to_check, count); 487 q->first_to_check = add_buf(q->first_to_check, count);
489 if (atomic_sub(count, &q->nr_buf_used) == 0) 488 if (atomic_sub(count, &q->nr_buf_used) == 0)
490 qdio_perf_stat_inc(&perf_stats.inbound_queue_full); 489 qperf_inc(q, inbound_queue_full);
491 break; 490 break;
492 case SLSB_P_INPUT_ERROR: 491 case SLSB_P_INPUT_ERROR:
493 announce_buffer_error(q, count); 492 announce_buffer_error(q, count);
@@ -532,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
532 qdio_siga_sync_q(q); 531 qdio_siga_sync_q(q);
533 get_buf_state(q, q->first_to_check, &state, 0); 532 get_buf_state(q, q->first_to_check, &state, 0);
534 533
535 if (state == SLSB_P_INPUT_PRIMED) 534 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
536 /* more work coming */ 535 /* more work coming */
537 return 0; 536 return 0;
538 537
@@ -567,9 +566,10 @@ static void qdio_kick_handler(struct qdio_q *q)
567 count = sub_buf(end, start); 566 count = sub_buf(end, start);
568 567
569 if (q->is_input_q) { 568 if (q->is_input_q) {
570 qdio_perf_stat_inc(&perf_stats.inbound_handler); 569 qperf_inc(q, inbound_handler);
571 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); 570 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
572 } else 571 } else
572 qperf_inc(q, outbound_handler);
573 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", 573 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
574 start, count); 574 start, count);
575 575
@@ -583,24 +583,28 @@ static void qdio_kick_handler(struct qdio_q *q)
583 583
584static void __qdio_inbound_processing(struct qdio_q *q) 584static void __qdio_inbound_processing(struct qdio_q *q)
585{ 585{
586 qdio_perf_stat_inc(&perf_stats.tasklet_inbound); 586 qperf_inc(q, tasklet_inbound);
587again: 587again:
588 if (!qdio_inbound_q_moved(q)) 588 if (!qdio_inbound_q_moved(q))
589 return; 589 return;
590 590
591 qdio_kick_handler(q); 591 qdio_kick_handler(q);
592 592
593 if (!qdio_inbound_q_done(q)) 593 if (!qdio_inbound_q_done(q)) {
594 /* means poll time is not yet over */ 594 /* means poll time is not yet over */
595 qperf_inc(q, tasklet_inbound_resched);
595 goto again; 596 goto again;
597 }
596 598
597 qdio_stop_polling(q); 599 qdio_stop_polling(q);
598 /* 600 /*
599 * We need to check again to not lose initiative after 601 * We need to check again to not lose initiative after
600 * resetting the ACK state. 602 * resetting the ACK state.
601 */ 603 */
602 if (!qdio_inbound_q_done(q)) 604 if (!qdio_inbound_q_done(q)) {
605 qperf_inc(q, tasklet_inbound_resched2);
603 goto again; 606 goto again;
607 }
604} 608}
605 609
606void qdio_inbound_processing(unsigned long data) 610void qdio_inbound_processing(unsigned long data)
@@ -688,7 +692,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
688 return 0; 692 return 0;
689 693
690 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 694 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
691 qdio_perf_stat_inc(&perf_stats.siga_out); 695 qperf_inc(q, siga_write);
692 696
693 cc = qdio_siga_output(q, &busy_bit); 697 cc = qdio_siga_output(q, &busy_bit);
694 switch (cc) { 698 switch (cc) {
@@ -711,7 +715,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
711 715
712static void __qdio_outbound_processing(struct qdio_q *q) 716static void __qdio_outbound_processing(struct qdio_q *q)
713{ 717{
714 qdio_perf_stat_inc(&perf_stats.tasklet_outbound); 718 qperf_inc(q, tasklet_outbound);
715 BUG_ON(atomic_read(&q->nr_buf_used) < 0); 719 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
716 720
717 if (qdio_outbound_q_moved(q)) 721 if (qdio_outbound_q_moved(q))
@@ -739,12 +743,9 @@ static void __qdio_outbound_processing(struct qdio_q *q)
739 */ 743 */
740 if (qdio_outbound_q_done(q)) 744 if (qdio_outbound_q_done(q))
741 del_timer(&q->u.out.timer); 745 del_timer(&q->u.out.timer);
742 else { 746 else
743 if (!timer_pending(&q->u.out.timer)) { 747 if (!timer_pending(&q->u.out.timer))
744 mod_timer(&q->u.out.timer, jiffies + 10 * HZ); 748 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
745 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
746 }
747 }
748 return; 749 return;
749 750
750sched: 751sched:
@@ -784,7 +785,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
784 785
785static void __tiqdio_inbound_processing(struct qdio_q *q) 786static void __tiqdio_inbound_processing(struct qdio_q *q)
786{ 787{
787 qdio_perf_stat_inc(&perf_stats.thinint_inbound); 788 qperf_inc(q, tasklet_inbound);
788 qdio_sync_after_thinint(q); 789 qdio_sync_after_thinint(q);
789 790
790 /* 791 /*
@@ -799,7 +800,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
799 qdio_kick_handler(q); 800 qdio_kick_handler(q);
800 801
801 if (!qdio_inbound_q_done(q)) { 802 if (!qdio_inbound_q_done(q)) {
802 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 803 qperf_inc(q, tasklet_inbound_resched);
803 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 804 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
804 tasklet_schedule(&q->tasklet); 805 tasklet_schedule(&q->tasklet);
805 return; 806 return;
@@ -812,7 +813,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
812 * resetting the ACK state. 813 * resetting the ACK state.
813 */ 814 */
814 if (!qdio_inbound_q_done(q)) { 815 if (!qdio_inbound_q_done(q)) {
815 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); 816 qperf_inc(q, tasklet_inbound_resched2);
816 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 817 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
817 tasklet_schedule(&q->tasklet); 818 tasklet_schedule(&q->tasklet);
818 } 819 }
@@ -851,8 +852,6 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
851 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 852 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
852 return; 853 return;
853 854
854 qdio_perf_stat_inc(&perf_stats.pci_int);
855
856 for_each_input_queue(irq_ptr, q, i) 855 for_each_input_queue(irq_ptr, q, i)
857 tasklet_schedule(&q->tasklet); 856 tasklet_schedule(&q->tasklet);
858 857
@@ -923,8 +922,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
923 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 922 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
924 int cstat, dstat; 923 int cstat, dstat;
925 924
926 qdio_perf_stat_inc(&perf_stats.qdio_int);
927
928 if (!intparm || !irq_ptr) { 925 if (!intparm || !irq_ptr) {
929 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); 926 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
930 return; 927 return;
@@ -963,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
963 qdio_handle_activate_check(cdev, intparm, cstat, 960 qdio_handle_activate_check(cdev, intparm, cstat,
964 dstat); 961 dstat);
965 break; 962 break;
963 case QDIO_IRQ_STATE_STOPPED:
964 break;
966 default: 965 default:
967 WARN_ON(1); 966 WARN_ON(1);
968 } 967 }
@@ -1383,6 +1382,8 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1383{ 1382{
1384 int used, diff; 1383 int used, diff;
1385 1384
1385 qperf_inc(q, inbound_call);
1386
1386 if (!q->u.in.polling) 1387 if (!q->u.in.polling)
1387 goto set; 1388 goto set;
1388 1389
@@ -1438,14 +1439,16 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1438 unsigned char state; 1439 unsigned char state;
1439 int used, rc = 0; 1440 int used, rc = 0;
1440 1441
1441 qdio_perf_stat_inc(&perf_stats.outbound_handler); 1442 qperf_inc(q, outbound_call);
1442 1443
1443 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1444 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1444 used = atomic_add_return(count, &q->nr_buf_used); 1445 used = atomic_add_return(count, &q->nr_buf_used);
1445 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1446 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1446 1447
1447 if (callflags & QDIO_FLAG_PCI_OUT) 1448 if (callflags & QDIO_FLAG_PCI_OUT) {
1448 q->u.out.pci_out_enabled = 1; 1449 q->u.out.pci_out_enabled = 1;
1450 qperf_inc(q, pci_request_int);
1451 }
1449 else 1452 else
1450 q->u.out.pci_out_enabled = 0; 1453 q->u.out.pci_out_enabled = 0;
1451 1454
@@ -1484,7 +1487,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1484 if (state != SLSB_CU_OUTPUT_PRIMED) 1487 if (state != SLSB_CU_OUTPUT_PRIMED)
1485 rc = qdio_kick_outbound_q(q); 1488 rc = qdio_kick_outbound_q(q);
1486 else 1489 else
1487 qdio_perf_stat_inc(&perf_stats.fast_requeue); 1490 qperf_inc(q, fast_requeue);
1488 1491
1489out: 1492out:
1490 tasklet_schedule(&q->tasklet); 1493 tasklet_schedule(&q->tasklet);
@@ -1540,16 +1543,11 @@ static int __init init_QDIO(void)
1540 rc = qdio_debug_init(); 1543 rc = qdio_debug_init();
1541 if (rc) 1544 if (rc)
1542 goto out_ti; 1545 goto out_ti;
1543 rc = qdio_setup_perf_stats();
1544 if (rc)
1545 goto out_debug;
1546 rc = tiqdio_register_thinints(); 1546 rc = tiqdio_register_thinints();
1547 if (rc) 1547 if (rc)
1548 goto out_perf; 1548 goto out_debug;
1549 return 0; 1549 return 0;
1550 1550
1551out_perf:
1552 qdio_remove_perf_stats();
1553out_debug: 1551out_debug:
1554 qdio_debug_exit(); 1552 qdio_debug_exit();
1555out_ti: 1553out_ti:
@@ -1563,7 +1561,6 @@ static void __exit exit_QDIO(void)
1563{ 1561{
1564 tiqdio_unregister_thinints(); 1562 tiqdio_unregister_thinints();
1565 tiqdio_free_memory(); 1563 tiqdio_free_memory();
1566 qdio_remove_perf_stats();
1567 qdio_debug_exit(); 1564 qdio_debug_exit();
1568 qdio_setup_exit(); 1565 qdio_setup_exit();
1569} 1566}
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
deleted file mode 100644
index 54f7c325a3e6..000000000000
--- a/drivers/s390/cio/qdio_perf.c
+++ /dev/null
@@ -1,149 +0,0 @@
1/*
2 * drivers/s390/cio/qdio_perf.c
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#include <linux/kernel.h>
9#include <linux/proc_fs.h>
10#include <linux/seq_file.h>
11#include <asm/ccwdev.h>
12
13#include "cio.h"
14#include "css.h"
15#include "device.h"
16#include "ioasm.h"
17#include "chsc.h"
18#include "qdio_debug.h"
19#include "qdio_perf.h"
20
21int qdio_performance_stats;
22struct qdio_perf_stats perf_stats;
23
24#ifdef CONFIG_PROC_FS
25static struct proc_dir_entry *qdio_perf_pde;
26#endif
27
28/*
29 * procfs functions
30 */
31static int qdio_perf_proc_show(struct seq_file *m, void *v)
32{
33 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
34 (long)atomic_long_read(&perf_stats.qdio_int));
35 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
36 (long)atomic_long_read(&perf_stats.pci_int));
37 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
38 (long)atomic_long_read(&perf_stats.thin_int));
39 seq_printf(m, "\n");
40 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
41 (long)atomic_long_read(&perf_stats.tasklet_inbound));
42 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
43 (long)atomic_long_read(&perf_stats.tasklet_outbound));
44 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
45 (long)atomic_long_read(&perf_stats.tasklet_thinint),
46 (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
47 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
48 (long)atomic_long_read(&perf_stats.thinint_inbound),
49 (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
50 seq_printf(m, "\n");
51 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
52 (long)atomic_long_read(&perf_stats.siga_in));
53 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
54 (long)atomic_long_read(&perf_stats.siga_out));
55 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
56 (long)atomic_long_read(&perf_stats.siga_sync));
57 seq_printf(m, "\n");
58 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
59 (long)atomic_long_read(&perf_stats.inbound_handler));
60 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
61 (long)atomic_long_read(&perf_stats.outbound_handler));
62 seq_printf(m, "\n");
63 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
64 (long)atomic_long_read(&perf_stats.fast_requeue));
65 seq_printf(m, "Number of outbound target full condition\t: %li\n",
66 (long)atomic_long_read(&perf_stats.outbound_target_full));
67 seq_printf(m, "Number of inbound queue full condition\t\t: %li\n",
68 (long)atomic_long_read(&perf_stats.inbound_queue_full));
69 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
70 (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
71 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
72 (long)atomic_long_read(&perf_stats.debug_stop_polling));
73 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
74 (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
75 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
76 (long)atomic_long_read(&perf_stats.debug_eqbs_all),
77 (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
78 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
79 (long)atomic_long_read(&perf_stats.debug_sqbs_all),
80 (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
81 seq_printf(m, "\n");
82 return 0;
83}
84static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
85{
86 return single_open(filp, qdio_perf_proc_show, NULL);
87}
88
89static const struct file_operations qdio_perf_proc_fops = {
90 .owner = THIS_MODULE,
91 .open = qdio_perf_seq_open,
92 .read = seq_read,
93 .llseek = seq_lseek,
94 .release = single_release,
95};
96
97/*
98 * sysfs functions
99 */
100static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
101{
102 return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
103}
104
105static ssize_t qdio_perf_stats_store(struct bus_type *bus,
106 const char *buf, size_t count)
107{
108 unsigned long i;
109
110 if (strict_strtoul(buf, 16, &i) != 0)
111 return -EINVAL;
112 if ((i != 0) && (i != 1))
113 return -EINVAL;
114 if (i == qdio_performance_stats)
115 return count;
116
117 qdio_performance_stats = i;
118 /* reset performance statistics */
119 if (i == 0)
120 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
121 return count;
122}
123
124static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
125 qdio_perf_stats_store);
126
127int __init qdio_setup_perf_stats(void)
128{
129 int rc;
130
131 rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
132 if (rc)
133 return rc;
134
135#ifdef CONFIG_PROC_FS
136 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
137 qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
138 NULL, &qdio_perf_proc_fops);
139#endif
140 return 0;
141}
142
143void qdio_remove_perf_stats(void)
144{
145#ifdef CONFIG_PROC_FS
146 remove_proc_entry("qdio_perf", NULL);
147#endif
148 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
149}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
deleted file mode 100644
index 12454231dc8b..000000000000
--- a/drivers/s390/cio/qdio_perf.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * drivers/s390/cio/qdio_perf.h
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#ifndef QDIO_PERF_H
9#define QDIO_PERF_H
10
11#include <linux/types.h>
12#include <asm/atomic.h>
13
14struct qdio_perf_stats {
15 /* interrupt handler calls */
16 atomic_long_t qdio_int;
17 atomic_long_t pci_int;
18 atomic_long_t thin_int;
19
20 /* tasklet runs */
21 atomic_long_t tasklet_inbound;
22 atomic_long_t tasklet_outbound;
23 atomic_long_t tasklet_thinint;
24 atomic_long_t tasklet_thinint_loop;
25 atomic_long_t thinint_inbound;
26 atomic_long_t thinint_inbound_loop;
27 atomic_long_t thinint_inbound_loop2;
28
29 /* signal adapter calls */
30 atomic_long_t siga_out;
31 atomic_long_t siga_in;
32 atomic_long_t siga_sync;
33
34 /* misc */
35 atomic_long_t inbound_handler;
36 atomic_long_t outbound_handler;
37 atomic_long_t fast_requeue;
38 atomic_long_t outbound_target_full;
39 atomic_long_t inbound_queue_full;
40
41 /* for debugging */
42 atomic_long_t debug_tl_out_timer;
43 atomic_long_t debug_stop_polling;
44 atomic_long_t debug_eqbs_all;
45 atomic_long_t debug_eqbs_incomplete;
46 atomic_long_t debug_sqbs_all;
47 atomic_long_t debug_sqbs_incomplete;
48};
49
50extern struct qdio_perf_stats perf_stats;
51extern int qdio_performance_stats;
52
53static inline void qdio_perf_stat_inc(atomic_long_t *count)
54{
55 if (qdio_performance_stats)
56 atomic_long_inc(count);
57}
58
59int qdio_setup_perf_stats(void);
60void qdio_remove_perf_stats(void);
61
62#endif
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 981a77ea7ee2..091d904d3182 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -1,9 +1,7 @@
1/* 1/*
2 * linux/drivers/s390/cio/thinint_qdio.c 2 * linux/drivers/s390/cio/thinint_qdio.c
3 * 3 *
4 * thin interrupt support for qdio 4 * Copyright 2000,2009 IBM Corp.
5 *
6 * Copyright 2000-2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * Jan Glauber <jang@linux.vnet.ibm.com> 7 * Jan Glauber <jang@linux.vnet.ibm.com>
@@ -19,7 +17,6 @@
19#include "ioasm.h" 17#include "ioasm.h"
20#include "qdio.h" 18#include "qdio.h"
21#include "qdio_debug.h" 19#include "qdio_debug.h"
22#include "qdio_perf.h"
23 20
24/* 21/*
25 * Restriction: only 63 iqdio subchannels would have its own indicator, 22 * Restriction: only 63 iqdio subchannels would have its own indicator,
@@ -132,8 +129,6 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
132{ 129{
133 struct qdio_q *q; 130 struct qdio_q *q;
134 131
135 qdio_perf_stat_inc(&perf_stats.thin_int);
136
137 /* 132 /*
138 * SVS only when needed: issue SVS to benefit from iqdio interrupt 133 * SVS only when needed: issue SVS to benefit from iqdio interrupt
139 * avoidance (SVS clears adapter interrupt suppression overwrite) 134 * avoidance (SVS clears adapter interrupt suppression overwrite)
@@ -154,6 +149,7 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
154 list_for_each_entry_rcu(q, &tiq_list, entry) 149 list_for_each_entry_rcu(q, &tiq_list, entry)
155 /* only process queues from changed sets */ 150 /* only process queues from changed sets */
156 if (*q->irq_ptr->dsci) { 151 if (*q->irq_ptr->dsci) {
152 qperf_inc(q, adapter_int);
157 153
158 /* only clear it if the indicator is non-shared */ 154 /* only clear it if the indicator is non-shared */
159 if (!shared_ind(q->irq_ptr)) 155 if (!shared_ind(q->irq_ptr))
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 0d4d18bdd45c..c68be24e27d9 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -393,10 +393,12 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
393 * u_mult_inv > 128 bytes. 393 * u_mult_inv > 128 bytes.
394 */ 394 */
395 if (copied == 0) { 395 if (copied == 0) {
396 int len; 396 unsigned int len;
397 spin_unlock_bh(&zcrypt_device_lock); 397 spin_unlock_bh(&zcrypt_device_lock);
398 /* len is max 256 / 2 - 120 = 8 */ 398 /* len is max 256 / 2 - 120 = 8 */
399 len = crt->inputdatalength / 2 - 120; 399 len = crt->inputdatalength / 2 - 120;
400 if (len > sizeof(z1))
401 return -EFAULT;
400 z1 = z2 = z3 = 0; 402 z1 = z2 = z3 = 0;
401 if (copy_from_user(&z1, crt->np_prime, len) || 403 if (copy_from_user(&z1, crt->np_prime, len) ||
402 copy_from_user(&z2, crt->bp_key, len) || 404 copy_from_user(&z2, crt->bp_key, len) ||
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index a23726a0735c..142f72a2ca5a 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -373,6 +373,8 @@ static int convert_type86(struct zcrypt_device *zdev,
373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
374 return -EAGAIN; 374 return -EAGAIN;
375 } 375 }
376 if (service_rc == 8 && service_rs == 72)
377 return -EINVAL;
376 zdev->online = 0; 378 zdev->online = 0;
377 return -EAGAIN; /* repeat the request on a different device. */ 379 return -EAGAIN; /* repeat the request on a different device. */
378 } 380 }
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 79c120578e61..68f3e6204db8 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -470,6 +470,8 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
470 } 470 }
471 if (service_rc == 12 && service_rs == 769) 471 if (service_rc == 12 && service_rs == 769)
472 return -EINVAL; 472 return -EINVAL;
473 if (service_rc == 8 && service_rs == 72)
474 return -EINVAL;
473 zdev->online = 0; 475 zdev->online = 0;
474 return -EAGAIN; /* repeat the request on a different device. */ 476 return -EAGAIN; /* repeat the request on a different device. */
475 } 477 }
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 3c77bfe0764c..147bb1a69aba 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3398,7 +3398,7 @@ claw_init(void)
3398 goto out_err; 3398 goto out_err;
3399 } 3399 }
3400 CLAW_DBF_TEXT(2, setup, "init_mod"); 3400 CLAW_DBF_TEXT(2, setup, "init_mod");
3401 claw_root_dev = root_device_register("qeth"); 3401 claw_root_dev = root_device_register("claw");
3402 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0; 3402 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3403 if (ret) 3403 if (ret)
3404 goto register_err; 3404 goto register_err;
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index f932400e980a..0eb6eefd2c1a 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/miscdevice.h> 14#include <linux/miscdevice.h>
15#include <asm/compat.h>
15#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
16#include "zfcp_def.h" 17#include "zfcp_def.h"
17#include "zfcp_ext.h" 18#include "zfcp_ext.h"
@@ -163,7 +164,7 @@ static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
163} 164}
164 165
165static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, 166static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
166 unsigned long buffer) 167 unsigned long arg)
167{ 168{
168 struct zfcp_cfdc_data *data; 169 struct zfcp_cfdc_data *data;
169 struct zfcp_cfdc_data __user *data_user; 170 struct zfcp_cfdc_data __user *data_user;
@@ -175,7 +176,11 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
175 if (command != ZFCP_CFDC_IOC) 176 if (command != ZFCP_CFDC_IOC)
176 return -ENOTTY; 177 return -ENOTTY;
177 178
178 data_user = (void __user *) buffer; 179 if (is_compat_task())
180 data_user = compat_ptr(arg);
181 else
182 data_user = (void __user *)arg;
183
179 if (!data_user) 184 if (!data_user)
180 return -EINVAL; 185 return -EINVAL;
181 186
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 84450955ae11..7369c8911bcf 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -327,7 +327,7 @@ static void zfcp_dbf_hba_view_response(char **p,
327 break; 327 break;
328 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 328 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
329 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 329 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
330 p += sprintf(*p, "\n"); 330 *p += sprintf(*p, "\n");
331 break; 331 break;
332 332
333 case FSF_QTCB_OPEN_PORT_WITH_DID: 333 case FSF_QTCB_OPEN_PORT_WITH_DID:
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 03dec832b465..66bdb34143cb 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -108,6 +108,7 @@ extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
108extern int zfcp_fc_gs_setup(struct zfcp_adapter *); 108extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
109extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); 109extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
110extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); 110extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
111extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
111 112
112/* zfcp_fsf.c */ 113/* zfcp_fsf.c */
113extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 114extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
@@ -129,9 +130,9 @@ extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
129extern int zfcp_fsf_status_read(struct zfcp_qdio *); 130extern int zfcp_fsf_status_read(struct zfcp_qdio *);
130extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); 131extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
131extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, 132extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
132 mempool_t *); 133 mempool_t *, unsigned int);
133extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, 134extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
134 struct zfcp_fsf_ct_els *); 135 struct zfcp_fsf_ct_els *, unsigned int);
135extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 136extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
136 struct scsi_cmnd *); 137 struct scsi_cmnd *);
137extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 138extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index ac5e3b7a3576..271399f62f1b 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -258,7 +258,8 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
258 gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; 258 gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
259 259
260 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, 260 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
261 adapter->pool.gid_pn_req); 261 adapter->pool.gid_pn_req,
262 ZFCP_FC_CTELS_TMO);
262 if (!ret) { 263 if (!ret) {
263 wait_for_completion(&completion); 264 wait_for_completion(&completion);
264 zfcp_fc_ns_gid_pn_eval(gid_pn); 265 zfcp_fc_ns_gid_pn_eval(gid_pn);
@@ -421,7 +422,8 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
421 hton24(adisc->adisc_req.adisc_port_id, 422 hton24(adisc->adisc_req.adisc_port_id,
422 fc_host_port_id(adapter->scsi_host)); 423 fc_host_port_id(adapter->scsi_host));
423 424
424 ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els); 425 ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
426 ZFCP_FC_CTELS_TMO);
425 if (ret) 427 if (ret)
426 kmem_cache_free(zfcp_data.adisc_cache, adisc); 428 kmem_cache_free(zfcp_data.adisc_cache, adisc);
427 429
@@ -532,7 +534,8 @@ static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
532 ct->req = &gpn_ft->sg_req; 534 ct->req = &gpn_ft->sg_req;
533 ct->resp = gpn_ft->sg_resp; 535 ct->resp = gpn_ft->sg_resp;
534 536
535 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL); 537 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
538 ZFCP_FC_CTELS_TMO);
536 if (!ret) 539 if (!ret)
537 wait_for_completion(&completion); 540 wait_for_completion(&completion);
538 return ret; 541 return ret;
@@ -668,15 +671,52 @@ static void zfcp_fc_ct_els_job_handler(void *data)
668{ 671{
669 struct fc_bsg_job *job = data; 672 struct fc_bsg_job *job = data;
670 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; 673 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
671 int status = zfcp_ct_els->status; 674 struct fc_bsg_reply *jr = job->reply;
672 int reply_status;
673 675
674 reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; 676 jr->reply_payload_rcv_len = job->reply_payload.payload_len;
675 job->reply->reply_data.ctels_reply.status = reply_status; 677 jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
676 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; 678 jr->result = zfcp_ct_els->status ? -EIO : 0;
677 job->job_done(job); 679 job->job_done(job);
678} 680}
679 681
682static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
683{
684 u32 preamble_word1;
685 u8 gs_type;
686 struct zfcp_adapter *adapter;
687
688 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
689 gs_type = (preamble_word1 & 0xff000000) >> 24;
690
691 adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
692
693 switch (gs_type) {
694 case FC_FST_ALIAS:
695 return &adapter->gs->as;
696 case FC_FST_MGMT:
697 return &adapter->gs->ms;
698 case FC_FST_TIME:
699 return &adapter->gs->ts;
700 break;
701 case FC_FST_DIR:
702 return &adapter->gs->ds;
703 break;
704 default:
705 return NULL;
706 }
707}
708
709static void zfcp_fc_ct_job_handler(void *data)
710{
711 struct fc_bsg_job *job = data;
712 struct zfcp_fc_wka_port *wka_port;
713
714 wka_port = zfcp_fc_job_wka_port(job);
715 zfcp_fc_wka_port_put(wka_port);
716
717 zfcp_fc_ct_els_job_handler(data);
718}
719
680static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, 720static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
681 struct zfcp_adapter *adapter) 721 struct zfcp_adapter *adapter)
682{ 722{
@@ -695,43 +735,27 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
695 } else 735 } else
696 d_id = ntoh24(job->request->rqst_data.h_els.port_id); 736 d_id = ntoh24(job->request->rqst_data.h_els.port_id);
697 737
698 return zfcp_fsf_send_els(adapter, d_id, els); 738 els->handler = zfcp_fc_ct_els_job_handler;
739 return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
699} 740}
700 741
701static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, 742static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
702 struct zfcp_adapter *adapter) 743 struct zfcp_adapter *adapter)
703{ 744{
704 int ret; 745 int ret;
705 u8 gs_type;
706 struct zfcp_fsf_ct_els *ct = job->dd_data; 746 struct zfcp_fsf_ct_els *ct = job->dd_data;
707 struct zfcp_fc_wka_port *wka_port; 747 struct zfcp_fc_wka_port *wka_port;
708 u32 preamble_word1;
709 748
710 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; 749 wka_port = zfcp_fc_job_wka_port(job);
711 gs_type = (preamble_word1 & 0xff000000) >> 24; 750 if (!wka_port)
712 751 return -EINVAL;
713 switch (gs_type) {
714 case FC_FST_ALIAS:
715 wka_port = &adapter->gs->as;
716 break;
717 case FC_FST_MGMT:
718 wka_port = &adapter->gs->ms;
719 break;
720 case FC_FST_TIME:
721 wka_port = &adapter->gs->ts;
722 break;
723 case FC_FST_DIR:
724 wka_port = &adapter->gs->ds;
725 break;
726 default:
727 return -EINVAL; /* no such service */
728 }
729 752
730 ret = zfcp_fc_wka_port_get(wka_port); 753 ret = zfcp_fc_wka_port_get(wka_port);
731 if (ret) 754 if (ret)
732 return ret; 755 return ret;
733 756
734 ret = zfcp_fsf_send_ct(wka_port, ct, NULL); 757 ct->handler = zfcp_fc_ct_job_handler;
758 ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
735 if (ret) 759 if (ret)
736 zfcp_fc_wka_port_put(wka_port); 760 zfcp_fc_wka_port_put(wka_port);
737 761
@@ -752,7 +776,6 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
752 776
753 ct_els->req = job->request_payload.sg_list; 777 ct_els->req = job->request_payload.sg_list;
754 ct_els->resp = job->reply_payload.sg_list; 778 ct_els->resp = job->reply_payload.sg_list;
755 ct_els->handler = zfcp_fc_ct_els_job_handler;
756 ct_els->handler_data = job; 779 ct_els->handler_data = job;
757 780
758 switch (job->request->msgcode) { 781 switch (job->request->msgcode) {
@@ -767,6 +790,12 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
767 } 790 }
768} 791}
769 792
793int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
794{
795 /* hardware tracks timeout, reset bsg timeout to not interfere */
796 return -EAGAIN;
797}
798
770int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) 799int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
771{ 800{
772 struct zfcp_fc_wka_ports *wka_ports; 801 struct zfcp_fc_wka_ports *wka_ports;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index cb2a3669a384..0747b087390d 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -27,6 +27,8 @@
27#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \ 27#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \
28 (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) 28 (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
29 29
30#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
31
30/** 32/**
31 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request 33 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
32 * @ct_hdr: FC GS common transport header 34 * @ct_hdr: FC GS common transport header
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 482dcd97aa5d..e8fb4d9baa8b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1068,20 +1068,20 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1068static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1068static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1069 struct scatterlist *sg_req, 1069 struct scatterlist *sg_req,
1070 struct scatterlist *sg_resp, 1070 struct scatterlist *sg_resp,
1071 int max_sbals) 1071 int max_sbals, unsigned int timeout)
1072{ 1072{
1073 int ret; 1073 int ret;
1074 unsigned int fcp_chan_timeout;
1075 1074
1076 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); 1075 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
1077 if (ret) 1076 if (ret)
1078 return ret; 1077 return ret;
1079 1078
1080 /* common settings for ct/gs and els requests */ 1079 /* common settings for ct/gs and els requests */
1081 fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000; 1080 if (timeout > 255)
1081 timeout = 255; /* max value accepted by hardware */
1082 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1082 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1083 req->qtcb->bottom.support.timeout = fcp_chan_timeout; 1083 req->qtcb->bottom.support.timeout = timeout;
1084 zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ); 1084 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1085 1085
1086 return 0; 1086 return 0;
1087} 1087}
@@ -1092,7 +1092,8 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1092 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1092 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1093 */ 1093 */
1094int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1094int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1095 struct zfcp_fsf_ct_els *ct, mempool_t *pool) 1095 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1096 unsigned int timeout)
1096{ 1097{
1097 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1098 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1098 struct zfcp_fsf_req *req; 1099 struct zfcp_fsf_req *req;
@@ -1111,7 +1112,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1111 1112
1112 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1113 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1113 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, 1114 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
1114 FSF_MAX_SBALS_PER_REQ); 1115 FSF_MAX_SBALS_PER_REQ, timeout);
1115 if (ret) 1116 if (ret)
1116 goto failed_send; 1117 goto failed_send;
1117 1118
@@ -1188,7 +1189,7 @@ skip_fsfstatus:
1188 * @els: pointer to struct zfcp_send_els with data for the command 1189 * @els: pointer to struct zfcp_send_els with data for the command
1189 */ 1190 */
1190int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1191int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1191 struct zfcp_fsf_ct_els *els) 1192 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1192{ 1193{
1193 struct zfcp_fsf_req *req; 1194 struct zfcp_fsf_req *req;
1194 struct zfcp_qdio *qdio = adapter->qdio; 1195 struct zfcp_qdio *qdio = adapter->qdio;
@@ -1206,7 +1207,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1206 } 1207 }
1207 1208
1208 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1209 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1209 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2); 1210 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout);
1210 1211
1211 if (ret) 1212 if (ret)
1212 goto failed_send; 1213 goto failed_send;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 771cc536a989..8e6fc68d6bd4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -652,6 +652,7 @@ struct fc_function_template zfcp_transport_functions = {
652 .show_host_port_state = 1, 652 .show_host_port_state = 1,
653 .show_host_active_fc4s = 1, 653 .show_host_active_fc4s = 1,
654 .bsg_request = zfcp_fc_exec_bsg_job, 654 .bsg_request = zfcp_fc_exec_bsg_job,
655 .bsg_timeout = zfcp_fc_timeout_bsg_job,
655 /* no functions registered for following dynamic attributes but 656 /* no functions registered for following dynamic attributes but
656 directly set by LLDD */ 657 directly set by LLDD */
657 .show_host_port_type = 1, 658 .show_host_port_type = 1,
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index 7c815d3327f7..28d86f9df83c 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -522,6 +522,40 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct of_device *op,
522 set_fan_speeds(fp); 522 set_fan_speeds(fp);
523} 523}
524 524
525static void destroy_one_temp(struct bbc_cpu_temperature *tp)
526{
527 bbc_i2c_detach(tp->client);
528 kfree(tp);
529}
530
531static void destroy_all_temps(struct bbc_i2c_bus *bp)
532{
533 struct bbc_cpu_temperature *tp, *tpos;
534
535 list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) {
536 list_del(&tp->bp_list);
537 list_del(&tp->glob_list);
538 destroy_one_temp(tp);
539 }
540}
541
542static void destroy_one_fan(struct bbc_fan_control *fp)
543{
544 bbc_i2c_detach(fp->client);
545 kfree(fp);
546}
547
548static void destroy_all_fans(struct bbc_i2c_bus *bp)
549{
550 struct bbc_fan_control *fp, *fpos;
551
552 list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) {
553 list_del(&fp->bp_list);
554 list_del(&fp->glob_list);
555 destroy_one_fan(fp);
556 }
557}
558
525int bbc_envctrl_init(struct bbc_i2c_bus *bp) 559int bbc_envctrl_init(struct bbc_i2c_bus *bp)
526{ 560{
527 struct of_device *op; 561 struct of_device *op;
@@ -541,6 +575,8 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
541 int err = PTR_ERR(kenvctrld_task); 575 int err = PTR_ERR(kenvctrld_task);
542 576
543 kenvctrld_task = NULL; 577 kenvctrld_task = NULL;
578 destroy_all_temps(bp);
579 destroy_all_fans(bp);
544 return err; 580 return err;
545 } 581 }
546 } 582 }
@@ -548,35 +584,11 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
548 return 0; 584 return 0;
549} 585}
550 586
551static void destroy_one_temp(struct bbc_cpu_temperature *tp)
552{
553 bbc_i2c_detach(tp->client);
554 kfree(tp);
555}
556
557static void destroy_one_fan(struct bbc_fan_control *fp)
558{
559 bbc_i2c_detach(fp->client);
560 kfree(fp);
561}
562
563void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp) 587void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp)
564{ 588{
565 struct bbc_cpu_temperature *tp, *tpos;
566 struct bbc_fan_control *fp, *fpos;
567
568 if (kenvctrld_task) 589 if (kenvctrld_task)
569 kthread_stop(kenvctrld_task); 590 kthread_stop(kenvctrld_task);
570 591
571 list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { 592 destroy_all_temps(bp);
572 list_del(&tp->bp_list); 593 destroy_all_fans(bp);
573 list_del(&tp->glob_list);
574 destroy_one_temp(tp);
575 }
576
577 list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) {
578 list_del(&fp->bp_list);
579 list_del(&fp->glob_list);
580 destroy_one_fan(fp);
581 }
582} 594}
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 2a889853a106..7e26ebc26661 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -293,7 +293,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
293 status = -EINVAL; 293 status = -EINVAL;
294 } 294 }
295 } 295 }
296 aac_fib_complete(fibptr); 296 /* Do not set XferState to zero unless receives a response from F/W */
297 if (status >= 0)
298 aac_fib_complete(fibptr);
299
297 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 300 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
298 if (status >= 0) { 301 if (status >= 0) {
299 if ((aac_commit == 1) || commit_flag) { 302 if ((aac_commit == 1) || commit_flag) {
@@ -310,13 +313,18 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
310 FsaNormal, 313 FsaNormal,
311 1, 1, 314 1, 1,
312 NULL, NULL); 315 NULL, NULL);
313 aac_fib_complete(fibptr); 316 /* Do not set XferState to zero unless
317 * receives a response from F/W */
318 if (status >= 0)
319 aac_fib_complete(fibptr);
314 } else if (aac_commit == 0) { 320 } else if (aac_commit == 0) {
315 printk(KERN_WARNING 321 printk(KERN_WARNING
316 "aac_get_config_status: Foreign device configurations are being ignored\n"); 322 "aac_get_config_status: Foreign device configurations are being ignored\n");
317 } 323 }
318 } 324 }
319 aac_fib_free(fibptr); 325 /* FIB should be freed only after getting the response from the F/W */
326 if (status != -ERESTARTSYS)
327 aac_fib_free(fibptr);
320 return status; 328 return status;
321} 329}
322 330
@@ -355,7 +363,9 @@ int aac_get_containers(struct aac_dev *dev)
355 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 363 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
356 aac_fib_complete(fibptr); 364 aac_fib_complete(fibptr);
357 } 365 }
358 aac_fib_free(fibptr); 366 /* FIB should be freed only after getting the response from the F/W */
367 if (status != -ERESTARTSYS)
368 aac_fib_free(fibptr);
359 369
360 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 370 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
361 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 371 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
@@ -1245,8 +1255,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
1245 NULL); 1255 NULL);
1246 1256
1247 if (rcode < 0) { 1257 if (rcode < 0) {
1248 aac_fib_complete(fibptr); 1258 /* FIB should be freed only after
1249 aac_fib_free(fibptr); 1259 * getting the response from the F/W */
1260 if (rcode != -ERESTARTSYS) {
1261 aac_fib_complete(fibptr);
1262 aac_fib_free(fibptr);
1263 }
1250 return rcode; 1264 return rcode;
1251 } 1265 }
1252 memcpy(&dev->adapter_info, info, sizeof(*info)); 1266 memcpy(&dev->adapter_info, info, sizeof(*info));
@@ -1270,6 +1284,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
1270 1284
1271 if (rcode >= 0) 1285 if (rcode >= 0)
1272 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); 1286 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
1287 if (rcode == -ERESTARTSYS) {
1288 fibptr = aac_fib_alloc(dev);
1289 if (!fibptr)
1290 return -ENOMEM;
1291 }
1292
1273 } 1293 }
1274 1294
1275 1295
@@ -1470,9 +1490,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
1470 (dev->scsi_host_ptr->sg_tablesize * 8) + 112; 1490 (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
1471 } 1491 }
1472 } 1492 }
1473 1493 /* FIB should be freed only after getting the response from the F/W */
1474 aac_fib_complete(fibptr); 1494 if (rcode != -ERESTARTSYS) {
1475 aac_fib_free(fibptr); 1495 aac_fib_complete(fibptr);
1496 aac_fib_free(fibptr);
1497 }
1476 1498
1477 return rcode; 1499 return rcode;
1478} 1500}
@@ -1633,6 +1655,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
1633 * Alocate and initialize a Fib 1655 * Alocate and initialize a Fib
1634 */ 1656 */
1635 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1657 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1658 printk(KERN_WARNING "aac_read: fib allocation failed\n");
1636 return -1; 1659 return -1;
1637 } 1660 }
1638 1661
@@ -1712,9 +1735,14 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1712 * Allocate and initialize a Fib then setup a BlockWrite command 1735 * Allocate and initialize a Fib then setup a BlockWrite command
1713 */ 1736 */
1714 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1737 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1715 scsicmd->result = DID_ERROR << 16; 1738 /* FIB temporarily unavailable,not catastrophic failure */
1716 scsicmd->scsi_done(scsicmd); 1739
1717 return 0; 1740 /* scsicmd->result = DID_ERROR << 16;
1741 * scsicmd->scsi_done(scsicmd);
1742 * return 0;
1743 */
1744 printk(KERN_WARNING "aac_write: fib allocation failed\n");
1745 return -1;
1718 } 1746 }
1719 1747
1720 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); 1748 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 83986ed86556..619c02d9c862 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 2461 15# define AAC_DRIVER_BUILD 24702
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
@@ -1036,6 +1036,9 @@ struct aac_dev
1036 u8 printf_enabled; 1036 u8 printf_enabled;
1037 u8 in_reset; 1037 u8 in_reset;
1038 u8 msi; 1038 u8 msi;
1039 int management_fib_count;
1040 spinlock_t manage_lock;
1041
1039}; 1042};
1040 1043
1041#define aac_adapter_interrupt(dev) \ 1044#define aac_adapter_interrupt(dev) \
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 0391d759dfdb..9c0c91178538 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -153,7 +153,7 @@ cleanup:
153 fibptr->hw_fib_pa = hw_fib_pa; 153 fibptr->hw_fib_pa = hw_fib_pa;
154 fibptr->hw_fib_va = hw_fib; 154 fibptr->hw_fib_va = hw_fib;
155 } 155 }
156 if (retval != -EINTR) 156 if (retval != -ERESTARTSYS)
157 aac_fib_free(fibptr); 157 aac_fib_free(fibptr);
158 return retval; 158 return retval;
159} 159}
@@ -322,7 +322,7 @@ return_fib:
322 } 322 }
323 if (f.wait) { 323 if (f.wait) {
324 if(down_interruptible(&fibctx->wait_sem) < 0) { 324 if(down_interruptible(&fibctx->wait_sem) < 0) {
325 status = -EINTR; 325 status = -ERESTARTSYS;
326 } else { 326 } else {
327 /* Lock again and retry */ 327 /* Lock again and retry */
328 spin_lock_irqsave(&dev->fib_lock, flags); 328 spin_lock_irqsave(&dev->fib_lock, flags);
@@ -593,10 +593,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
593 u64 addr; 593 u64 addr;
594 void* p; 594 void* p;
595 if (upsg->sg[i].count > 595 if (upsg->sg[i].count >
596 (dev->adapter_info.options & 596 ((dev->adapter_info.options &
597 AAC_OPT_NEW_COMM) ? 597 AAC_OPT_NEW_COMM) ?
598 (dev->scsi_host_ptr->max_sectors << 9) : 598 (dev->scsi_host_ptr->max_sectors << 9) :
599 65536) { 599 65536)) {
600 rcode = -EINVAL; 600 rcode = -EINVAL;
601 goto cleanup; 601 goto cleanup;
602 } 602 }
@@ -645,10 +645,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
645 u64 addr; 645 u64 addr;
646 void* p; 646 void* p;
647 if (usg->sg[i].count > 647 if (usg->sg[i].count >
648 (dev->adapter_info.options & 648 ((dev->adapter_info.options &
649 AAC_OPT_NEW_COMM) ? 649 AAC_OPT_NEW_COMM) ?
650 (dev->scsi_host_ptr->max_sectors << 9) : 650 (dev->scsi_host_ptr->max_sectors << 9) :
651 65536) { 651 65536)) {
652 rcode = -EINVAL; 652 rcode = -EINVAL;
653 goto cleanup; 653 goto cleanup;
654 } 654 }
@@ -695,10 +695,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
695 uintptr_t addr; 695 uintptr_t addr;
696 void* p; 696 void* p;
697 if (usg->sg[i].count > 697 if (usg->sg[i].count >
698 (dev->adapter_info.options & 698 ((dev->adapter_info.options &
699 AAC_OPT_NEW_COMM) ? 699 AAC_OPT_NEW_COMM) ?
700 (dev->scsi_host_ptr->max_sectors << 9) : 700 (dev->scsi_host_ptr->max_sectors << 9) :
701 65536) { 701 65536)) {
702 rcode = -EINVAL; 702 rcode = -EINVAL;
703 goto cleanup; 703 goto cleanup;
704 } 704 }
@@ -734,10 +734,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
734 dma_addr_t addr; 734 dma_addr_t addr;
735 void* p; 735 void* p;
736 if (upsg->sg[i].count > 736 if (upsg->sg[i].count >
737 (dev->adapter_info.options & 737 ((dev->adapter_info.options &
738 AAC_OPT_NEW_COMM) ? 738 AAC_OPT_NEW_COMM) ?
739 (dev->scsi_host_ptr->max_sectors << 9) : 739 (dev->scsi_host_ptr->max_sectors << 9) :
740 65536) { 740 65536)) {
741 rcode = -EINVAL; 741 rcode = -EINVAL;
742 goto cleanup; 742 goto cleanup;
743 } 743 }
@@ -772,8 +772,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
772 psg->count = cpu_to_le32(sg_indx+1); 772 psg->count = cpu_to_le32(sg_indx+1);
773 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 773 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
774 } 774 }
775 if (status == -EINTR) { 775 if (status == -ERESTARTSYS) {
776 rcode = -EINTR; 776 rcode = -ERESTARTSYS;
777 goto cleanup; 777 goto cleanup;
778 } 778 }
779 779
@@ -810,7 +810,7 @@ cleanup:
810 for(i=0; i <= sg_indx; i++){ 810 for(i=0; i <= sg_indx; i++){
811 kfree(sg_list[i]); 811 kfree(sg_list[i]);
812 } 812 }
813 if (rcode != -EINTR) { 813 if (rcode != -ERESTARTSYS) {
814 aac_fib_complete(srbfib); 814 aac_fib_complete(srbfib);
815 aac_fib_free(srbfib); 815 aac_fib_free(srbfib);
816 } 816 }
@@ -848,7 +848,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
848 */ 848 */
849 849
850 status = aac_dev_ioctl(dev, cmd, arg); 850 status = aac_dev_ioctl(dev, cmd, arg);
851 if(status != -ENOTTY) 851 if (status != -ENOTTY)
852 return status; 852 return status;
853 853
854 switch (cmd) { 854 switch (cmd) {
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 666d5151d628..a7261486ccd4 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -194,7 +194,9 @@ int aac_send_shutdown(struct aac_dev * dev)
194 194
195 if (status >= 0) 195 if (status >= 0)
196 aac_fib_complete(fibctx); 196 aac_fib_complete(fibctx);
197 aac_fib_free(fibctx); 197 /* FIB should be freed only after getting the response from the F/W */
198 if (status != -ERESTARTSYS)
199 aac_fib_free(fibctx);
198 return status; 200 return status;
199} 201}
200 202
@@ -304,6 +306,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
304 /* 306 /*
305 * Check the preferred comm settings, defaults from template. 307 * Check the preferred comm settings, defaults from template.
306 */ 308 */
309 dev->management_fib_count = 0;
310 spin_lock_init(&dev->manage_lock);
307 dev->max_fib_size = sizeof(struct hw_fib); 311 dev->max_fib_size = sizeof(struct hw_fib);
308 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size 312 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
309 - sizeof(struct aac_fibhdr) 313 - sizeof(struct aac_fibhdr)
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 956261f25181..94d2954d79ae 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -189,7 +189,14 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
189 189
190void aac_fib_free(struct fib *fibptr) 190void aac_fib_free(struct fib *fibptr)
191{ 191{
192 unsigned long flags; 192 unsigned long flags, flagsv;
193
194 spin_lock_irqsave(&fibptr->event_lock, flagsv);
195 if (fibptr->done == 2) {
196 spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
197 return;
198 }
199 spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
193 200
194 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 201 spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
195 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) 202 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
@@ -390,6 +397,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
390 struct hw_fib * hw_fib = fibptr->hw_fib_va; 397 struct hw_fib * hw_fib = fibptr->hw_fib_va;
391 unsigned long flags = 0; 398 unsigned long flags = 0;
392 unsigned long qflags; 399 unsigned long qflags;
400 unsigned long mflags = 0;
401
393 402
394 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 403 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
395 return -EBUSY; 404 return -EBUSY;
@@ -471,9 +480,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
471 if (!dev->queues) 480 if (!dev->queues)
472 return -EBUSY; 481 return -EBUSY;
473 482
474 if(wait) 483 if (wait) {
484
485 spin_lock_irqsave(&dev->manage_lock, mflags);
486 if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
487 printk(KERN_INFO "No management Fibs Available:%d\n",
488 dev->management_fib_count);
489 spin_unlock_irqrestore(&dev->manage_lock, mflags);
490 return -EBUSY;
491 }
492 dev->management_fib_count++;
493 spin_unlock_irqrestore(&dev->manage_lock, mflags);
475 spin_lock_irqsave(&fibptr->event_lock, flags); 494 spin_lock_irqsave(&fibptr->event_lock, flags);
476 aac_adapter_deliver(fibptr); 495 }
496
497 if (aac_adapter_deliver(fibptr) != 0) {
498 printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
499 if (wait) {
500 spin_unlock_irqrestore(&fibptr->event_lock, flags);
501 spin_lock_irqsave(&dev->manage_lock, mflags);
502 dev->management_fib_count--;
503 spin_unlock_irqrestore(&dev->manage_lock, mflags);
504 }
505 return -EBUSY;
506 }
507
477 508
478 /* 509 /*
479 * If the caller wanted us to wait for response wait now. 510 * If the caller wanted us to wait for response wait now.
@@ -516,14 +547,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
516 udelay(5); 547 udelay(5);
517 } 548 }
518 } else if (down_interruptible(&fibptr->event_wait)) { 549 } else if (down_interruptible(&fibptr->event_wait)) {
519 fibptr->done = 2; 550 /* Do nothing ... satisfy
520 up(&fibptr->event_wait); 551 * down_interruptible must_check */
521 } 552 }
553
522 spin_lock_irqsave(&fibptr->event_lock, flags); 554 spin_lock_irqsave(&fibptr->event_lock, flags);
523 if ((fibptr->done == 0) || (fibptr->done == 2)) { 555 if (fibptr->done == 0) {
524 fibptr->done = 2; /* Tell interrupt we aborted */ 556 fibptr->done = 2; /* Tell interrupt we aborted */
525 spin_unlock_irqrestore(&fibptr->event_lock, flags); 557 spin_unlock_irqrestore(&fibptr->event_lock, flags);
526 return -EINTR; 558 return -ERESTARTSYS;
527 } 559 }
528 spin_unlock_irqrestore(&fibptr->event_lock, flags); 560 spin_unlock_irqrestore(&fibptr->event_lock, flags);
529 BUG_ON(fibptr->done == 0); 561 BUG_ON(fibptr->done == 0);
@@ -689,6 +721,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
689 721
690int aac_fib_complete(struct fib *fibptr) 722int aac_fib_complete(struct fib *fibptr)
691{ 723{
724 unsigned long flags;
692 struct hw_fib * hw_fib = fibptr->hw_fib_va; 725 struct hw_fib * hw_fib = fibptr->hw_fib_va;
693 726
694 /* 727 /*
@@ -709,6 +742,13 @@ int aac_fib_complete(struct fib *fibptr)
709 * command is complete that we had sent to the adapter and this 742 * command is complete that we had sent to the adapter and this
710 * cdb could be reused. 743 * cdb could be reused.
711 */ 744 */
745 spin_lock_irqsave(&fibptr->event_lock, flags);
746 if (fibptr->done == 2) {
747 spin_unlock_irqrestore(&fibptr->event_lock, flags);
748 return 0;
749 }
750 spin_unlock_irqrestore(&fibptr->event_lock, flags);
751
712 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 752 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
713 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 753 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
714 { 754 {
@@ -1355,7 +1395,10 @@ int aac_reset_adapter(struct aac_dev * aac, int forced)
1355 1395
1356 if (status >= 0) 1396 if (status >= 0)
1357 aac_fib_complete(fibctx); 1397 aac_fib_complete(fibctx);
1358 aac_fib_free(fibctx); 1398 /* FIB should be freed only after getting
1399 * the response from the F/W */
1400 if (status != -ERESTARTSYS)
1401 aac_fib_free(fibctx);
1359 } 1402 }
1360 } 1403 }
1361 1404
@@ -1759,6 +1802,7 @@ int aac_command_thread(void *data)
1759 struct fib *fibptr; 1802 struct fib *fibptr;
1760 1803
1761 if ((fibptr = aac_fib_alloc(dev))) { 1804 if ((fibptr = aac_fib_alloc(dev))) {
1805 int status;
1762 __le32 *info; 1806 __le32 *info;
1763 1807
1764 aac_fib_init(fibptr); 1808 aac_fib_init(fibptr);
@@ -1769,15 +1813,21 @@ int aac_command_thread(void *data)
1769 1813
1770 *info = cpu_to_le32(now.tv_sec); 1814 *info = cpu_to_le32(now.tv_sec);
1771 1815
1772 (void)aac_fib_send(SendHostTime, 1816 status = aac_fib_send(SendHostTime,
1773 fibptr, 1817 fibptr,
1774 sizeof(*info), 1818 sizeof(*info),
1775 FsaNormal, 1819 FsaNormal,
1776 1, 1, 1820 1, 1,
1777 NULL, 1821 NULL,
1778 NULL); 1822 NULL);
1779 aac_fib_complete(fibptr); 1823 /* Do not set XferState to zero unless
1780 aac_fib_free(fibptr); 1824 * receives a response from F/W */
1825 if (status >= 0)
1826 aac_fib_complete(fibptr);
1827 /* FIB should be freed only after
1828 * getting the response from the F/W */
1829 if (status != -ERESTARTSYS)
1830 aac_fib_free(fibptr);
1781 } 1831 }
1782 difference = (long)(unsigned)update_interval*HZ; 1832 difference = (long)(unsigned)update_interval*HZ;
1783 } else { 1833 } else {
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index abc9ef5d1b10..9c7408fe8c7d 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -57,9 +57,9 @@ unsigned int aac_response_normal(struct aac_queue * q)
57 struct hw_fib * hwfib; 57 struct hw_fib * hwfib;
58 struct fib * fib; 58 struct fib * fib;
59 int consumed = 0; 59 int consumed = 0;
60 unsigned long flags; 60 unsigned long flags, mflags;
61 61
62 spin_lock_irqsave(q->lock, flags); 62 spin_lock_irqsave(q->lock, flags);
63 /* 63 /*
64 * Keep pulling response QEs off the response queue and waking 64 * Keep pulling response QEs off the response queue and waking
65 * up the waiters until there are no more QEs. We then return 65 * up the waiters until there are no more QEs. We then return
@@ -125,12 +125,21 @@ unsigned int aac_response_normal(struct aac_queue * q)
125 } else { 125 } else {
126 unsigned long flagv; 126 unsigned long flagv;
127 spin_lock_irqsave(&fib->event_lock, flagv); 127 spin_lock_irqsave(&fib->event_lock, flagv);
128 if (!fib->done) 128 if (!fib->done) {
129 fib->done = 1; 129 fib->done = 1;
130 up(&fib->event_wait); 130 up(&fib->event_wait);
131 }
131 spin_unlock_irqrestore(&fib->event_lock, flagv); 132 spin_unlock_irqrestore(&fib->event_lock, flagv);
133
134 spin_lock_irqsave(&dev->manage_lock, mflags);
135 dev->management_fib_count--;
136 spin_unlock_irqrestore(&dev->manage_lock, mflags);
137
132 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 138 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
133 if (fib->done == 2) { 139 if (fib->done == 2) {
140 spin_lock_irqsave(&fib->event_lock, flagv);
141 fib->done = 0;
142 spin_unlock_irqrestore(&fib->event_lock, flagv);
134 aac_fib_complete(fib); 143 aac_fib_complete(fib);
135 aac_fib_free(fib); 144 aac_fib_free(fib);
136 } 145 }
@@ -232,6 +241,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
232 241
233unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) 242unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
234{ 243{
244 unsigned long mflags;
235 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); 245 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
236 if ((index & 0x00000002L)) { 246 if ((index & 0x00000002L)) {
237 struct hw_fib * hw_fib; 247 struct hw_fib * hw_fib;
@@ -320,11 +330,25 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
320 unsigned long flagv; 330 unsigned long flagv;
321 dprintk((KERN_INFO "event_wait up\n")); 331 dprintk((KERN_INFO "event_wait up\n"));
322 spin_lock_irqsave(&fib->event_lock, flagv); 332 spin_lock_irqsave(&fib->event_lock, flagv);
323 if (!fib->done) 333 if (!fib->done) {
324 fib->done = 1; 334 fib->done = 1;
325 up(&fib->event_wait); 335 up(&fib->event_wait);
336 }
326 spin_unlock_irqrestore(&fib->event_lock, flagv); 337 spin_unlock_irqrestore(&fib->event_lock, flagv);
338
339 spin_lock_irqsave(&dev->manage_lock, mflags);
340 dev->management_fib_count--;
341 spin_unlock_irqrestore(&dev->manage_lock, mflags);
342
327 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 343 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
344 if (fib->done == 2) {
345 spin_lock_irqsave(&fib->event_lock, flagv);
346 fib->done = 0;
347 spin_unlock_irqrestore(&fib->event_lock, flagv);
348 aac_fib_complete(fib);
349 aac_fib_free(fib);
350 }
351
328 } 352 }
329 return 0; 353 return 0;
330 } 354 }
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 4d419c155ce9..78971db5b60e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -3171,13 +3171,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3171 tinfo->curr.transport_version = 2; 3171 tinfo->curr.transport_version = 2;
3172 tinfo->goal.transport_version = 2; 3172 tinfo->goal.transport_version = 2;
3173 tinfo->goal.ppr_options = 0; 3173 tinfo->goal.ppr_options = 0;
3174 /* 3174 if (scb != NULL) {
3175 * Remove any SCBs in the waiting for selection 3175 /*
3176 * queue that may also be for this target so 3176 * Remove any SCBs in the waiting
3177 * that command ordering is preserved. 3177 * for selection queue that may
3178 */ 3178 * also be for this target so that
3179 ahd_freeze_devq(ahd, scb); 3179 * command ordering is preserved.
3180 ahd_qinfifo_requeue_tail(ahd, scb); 3180 */
3181 ahd_freeze_devq(ahd, scb);
3182 ahd_qinfifo_requeue_tail(ahd, scb);
3183 }
3181 printerror = 0; 3184 printerror = 0;
3182 } 3185 }
3183 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) 3186 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
@@ -3194,13 +3197,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3194 MSG_EXT_WDTR_BUS_8_BIT, 3197 MSG_EXT_WDTR_BUS_8_BIT,
3195 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3198 AHD_TRANS_CUR|AHD_TRANS_GOAL,
3196 /*paused*/TRUE); 3199 /*paused*/TRUE);
3197 /* 3200 if (scb != NULL) {
3198 * Remove any SCBs in the waiting for selection 3201 /*
3199 * queue that may also be for this target so that 3202 * Remove any SCBs in the waiting for
3200 * command ordering is preserved. 3203 * selection queue that may also be for
3201 */ 3204 * this target so that command ordering
3202 ahd_freeze_devq(ahd, scb); 3205 * is preserved.
3203 ahd_qinfifo_requeue_tail(ahd, scb); 3206 */
3207 ahd_freeze_devq(ahd, scb);
3208 ahd_qinfifo_requeue_tail(ahd, scb);
3209 }
3204 printerror = 0; 3210 printerror = 0;
3205 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) 3211 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
3206 && ppr_busfree == 0) { 3212 && ppr_busfree == 0) {
@@ -3217,13 +3223,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3217 /*ppr_options*/0, 3223 /*ppr_options*/0,
3218 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3224 AHD_TRANS_CUR|AHD_TRANS_GOAL,
3219 /*paused*/TRUE); 3225 /*paused*/TRUE);
3220 /* 3226 if (scb != NULL) {
3221 * Remove any SCBs in the waiting for selection 3227 /*
3222 * queue that may also be for this target so that 3228 * Remove any SCBs in the waiting for
3223 * command ordering is preserved. 3229 * selection queue that may also be for
3224 */ 3230 * this target so that command ordering
3225 ahd_freeze_devq(ahd, scb); 3231 * is preserved.
3226 ahd_qinfifo_requeue_tail(ahd, scb); 3232 */
3233 ahd_freeze_devq(ahd, scb);
3234 ahd_qinfifo_requeue_tail(ahd, scb);
3235 }
3227 printerror = 0; 3236 printerror = 0;
3228 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 3237 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
3229 && ahd_sent_msg(ahd, AHDMSG_1B, 3238 && ahd_sent_msg(ahd, AHDMSG_1B,
@@ -3251,7 +3260,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3251 * the message phases. We check it last in case we 3260 * the message phases. We check it last in case we
3252 * had to send some other message that caused a busfree. 3261 * had to send some other message that caused a busfree.
3253 */ 3262 */
3254 if (printerror != 0 3263 if (scb != NULL && printerror != 0
3255 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) 3264 && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
3256 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { 3265 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
3257 3266
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 477542602284..9e71ac611146 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
2516 if (info->scsi.phase == PHASE_IDLE) 2516 if (info->scsi.phase == PHASE_IDLE)
2517 fas216_kick(info); 2517 fas216_kick(info);
2518 2518
2519 mod_timer(&info->eh_timer, 30 * HZ); 2519 mod_timer(&info->eh_timer, jiffies + 30 * HZ);
2520 spin_unlock_irqrestore(&info->host_lock, flags); 2520 spin_unlock_irqrestore(&info->host_lock, flags);
2521 2521
2522 /* 2522 /*
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index 26ffdcd5a437..15a00e8b7122 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1440,6 +1440,10 @@ void cxgb3i_c3cn_release(struct s3_conn *c3cn)
1440static int is_cxgb3_dev(struct net_device *dev) 1440static int is_cxgb3_dev(struct net_device *dev)
1441{ 1441{
1442 struct cxgb3i_sdev_data *cdata; 1442 struct cxgb3i_sdev_data *cdata;
1443 struct net_device *ndev = dev;
1444
1445 if (dev->priv_flags & IFF_802_1Q_VLAN)
1446 ndev = vlan_dev_real_dev(dev);
1443 1447
1444 write_lock(&cdata_rwlock); 1448 write_lock(&cdata_rwlock);
1445 list_for_each_entry(cdata, &cdata_list, list) { 1449 list_for_each_entry(cdata, &cdata_list, list) {
@@ -1447,7 +1451,7 @@ static int is_cxgb3_dev(struct net_device *dev)
1447 int i; 1451 int i;
1448 1452
1449 for (i = 0; i < ports->nports; i++) 1453 for (i = 0; i < ports->nports; i++)
1450 if (dev == ports->lldevs[i]) { 1454 if (ndev == ports->lldevs[i]) {
1451 write_unlock(&cdata_rwlock); 1455 write_unlock(&cdata_rwlock);
1452 return 1; 1456 return 1;
1453 } 1457 }
@@ -1566,6 +1570,26 @@ out_err:
1566 return -EINVAL; 1570 return -EINVAL;
1567} 1571}
1568 1572
1573/**
1574 * cxgb3i_find_dev - find the interface associated with the given address
1575 * @ipaddr: ip address
1576 */
1577static struct net_device *
1578cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr)
1579{
1580 struct flowi fl;
1581 int err;
1582 struct rtable *rt;
1583
1584 memset(&fl, 0, sizeof(fl));
1585 fl.nl_u.ip4_u.daddr = ipaddr;
1586
1587 err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
1588 if (!err)
1589 return (&rt->u.dst)->dev;
1590
1591 return NULL;
1592}
1569 1593
1570/** 1594/**
1571 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address 1595 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
@@ -1581,6 +1605,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1581 struct cxgb3i_sdev_data *cdata; 1605 struct cxgb3i_sdev_data *cdata;
1582 struct t3cdev *cdev; 1606 struct t3cdev *cdev;
1583 __be32 sipv4; 1607 __be32 sipv4;
1608 struct net_device *dstdev;
1584 int err; 1609 int err;
1585 1610
1586 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); 1611 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
@@ -1591,6 +1616,13 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1591 c3cn->daddr.sin_port = usin->sin_port; 1616 c3cn->daddr.sin_port = usin->sin_port;
1592 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; 1617 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1593 1618
1619 dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr);
1620 if (!dstdev || !is_cxgb3_dev(dstdev))
1621 return -ENETUNREACH;
1622
1623 if (dstdev->priv_flags & IFF_802_1Q_VLAN)
1624 dev = dstdev;
1625
1594 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, 1626 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
1595 c3cn->daddr.sin_addr.s_addr, 1627 c3cn->daddr.sin_addr.s_addr,
1596 c3cn->saddr.sin_port, 1628 c3cn->saddr.sin_port,
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index ce522702a6c1..2cc39684ce97 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4142,8 +4142,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4142 spin_lock_irq(shost->host_lock); 4142 spin_lock_irq(shost->host_lock);
4143 if (vport->fc_rscn_flush) { 4143 if (vport->fc_rscn_flush) {
4144 /* Another thread is walking fc_rscn_id_list on this vport */ 4144 /* Another thread is walking fc_rscn_id_list on this vport */
4145 spin_unlock_irq(shost->host_lock);
4146 vport->fc_flag |= FC_RSCN_DISCOVERY; 4145 vport->fc_flag |= FC_RSCN_DISCOVERY;
4146 spin_unlock_irq(shost->host_lock);
4147 /* Send back ACC */ 4147 /* Send back ACC */
4148 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4148 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4149 return 0; 4149 return 0;
@@ -5948,8 +5948,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5948 lpfc_initial_fdisc(vport); 5948 lpfc_initial_fdisc(vport);
5949 break; 5949 break;
5950 } 5950 }
5951
5952 } else { 5951 } else {
5952 vport->vpi_state |= LPFC_VPI_REGISTERED;
5953 if (vport == phba->pport) 5953 if (vport == phba->pport)
5954 if (phba->sli_rev < LPFC_SLI_REV4) 5954 if (phba->sli_rev < LPFC_SLI_REV4)
5955 lpfc_issue_fabric_reglogin(vport); 5955 lpfc_issue_fabric_reglogin(vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3b9424427652..2445e399fd60 100755..100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -747,6 +747,10 @@ lpfc_linkdown(struct lpfc_hba *phba)
747 747
748 if (phba->link_state == LPFC_LINK_DOWN) 748 if (phba->link_state == LPFC_LINK_DOWN)
749 return 0; 749 return 0;
750
751 /* Block all SCSI stack I/Os */
752 lpfc_scsi_dev_block(phba);
753
750 spin_lock_irq(&phba->hbalock); 754 spin_lock_irq(&phba->hbalock);
751 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); 755 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
752 if (phba->link_state > LPFC_LINK_DOWN) { 756 if (phba->link_state > LPFC_LINK_DOWN) {
@@ -1555,10 +1559,16 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1555 * to book keeping the FCFIs can be used. 1559 * to book keeping the FCFIs can be used.
1556 */ 1560 */
1557 if (shdr_status || shdr_add_status) { 1561 if (shdr_status || shdr_add_status) {
1558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1562 if (shdr_status == STATUS_FCF_TABLE_EMPTY) {
1559 "2521 READ_FCF_RECORD mailbox failed " 1563 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1560 "with status x%x add_status x%x, mbx\n", 1564 "2726 READ_FCF_RECORD Indicates empty "
1561 shdr_status, shdr_add_status); 1565 "FCF table.\n");
1566 } else {
1567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1568 "2521 READ_FCF_RECORD mailbox failed "
1569 "with status x%x add_status x%x, mbx\n",
1570 shdr_status, shdr_add_status);
1571 }
1562 goto out; 1572 goto out;
1563 } 1573 }
1564 /* Interpreting the returned information of FCF records */ 1574 /* Interpreting the returned information of FCF records */
@@ -1698,7 +1708,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1698 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1708 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1699 return; 1709 return;
1700 } 1710 }
1711 spin_lock_irq(&phba->hbalock);
1701 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 1712 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
1713 spin_unlock_irq(&phba->hbalock);
1702 1714
1703 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 1715 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1704 lpfc_initial_fdisc(vport); 1716 lpfc_initial_fdisc(vport);
@@ -2259,7 +2271,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2259 mb->mbxStatus); 2271 mb->mbxStatus);
2260 break; 2272 break;
2261 } 2273 }
2274 spin_lock_irq(&phba->hbalock);
2262 vport->vpi_state &= ~LPFC_VPI_REGISTERED; 2275 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
2276 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2277 spin_unlock_irq(&phba->hbalock);
2263 vport->unreg_vpi_cmpl = VPORT_OK; 2278 vport->unreg_vpi_cmpl = VPORT_OK;
2264 mempool_free(pmb, phba->mbox_mem_pool); 2279 mempool_free(pmb, phba->mbox_mem_pool);
2265 /* 2280 /*
@@ -4475,8 +4490,10 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4475 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) 4490 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4476 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4491 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4477 lpfc_mbx_unreg_vpi(vports[i]); 4492 lpfc_mbx_unreg_vpi(vports[i]);
4493 spin_lock_irq(&phba->hbalock);
4478 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 4494 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4479 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 4495 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
4496 spin_unlock_irq(&phba->hbalock);
4480 } 4497 }
4481 lpfc_destroy_vport_work_array(phba, vports); 4498 lpfc_destroy_vport_work_array(phba, vports);
4482 4499
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1585148a17e5..8a2a1c5935c6 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1013,7 +1013,7 @@ struct lpfc_mbx_wq_destroy {
1013}; 1013};
1014 1014
1015#define LPFC_HDR_BUF_SIZE 128 1015#define LPFC_HDR_BUF_SIZE 128
1016#define LPFC_DATA_BUF_SIZE 4096 1016#define LPFC_DATA_BUF_SIZE 2048
1017struct rq_context { 1017struct rq_context {
1018 uint32_t word0; 1018 uint32_t word0;
1019#define lpfc_rq_context_rq_size_SHIFT 16 1019#define lpfc_rq_context_rq_size_SHIFT 16
@@ -1371,6 +1371,7 @@ struct lpfc_mbx_query_fw_cfg {
1371#define STATUS_ERROR_ACITMAIN 0x2a 1371#define STATUS_ERROR_ACITMAIN 0x2a
1372#define STATUS_REBOOT_REQUIRED 0x2c 1372#define STATUS_REBOOT_REQUIRED 0x2c
1373#define STATUS_FCF_IN_USE 0x3a 1373#define STATUS_FCF_IN_USE 0x3a
1374#define STATUS_FCF_TABLE_EMPTY 0x43
1374 1375
1375struct lpfc_mbx_sli4_config { 1376struct lpfc_mbx_sli4_config {
1376 struct mbox_header header; 1377 struct mbox_header header;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d4da6bdd0e73..b8eb1b6e5e77 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3006,6 +3006,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3006 struct lpfc_vport *vport; 3006 struct lpfc_vport *vport;
3007 struct lpfc_nodelist *ndlp; 3007 struct lpfc_nodelist *ndlp;
3008 struct Scsi_Host *shost; 3008 struct Scsi_Host *shost;
3009 uint32_t link_state;
3009 3010
3010 phba->fc_eventTag = acqe_fcoe->event_tag; 3011 phba->fc_eventTag = acqe_fcoe->event_tag;
3011 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3012 phba->fcoe_eventtag = acqe_fcoe->event_tag;
@@ -3052,9 +3053,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3052 break; 3053 break;
3053 /* 3054 /*
3054 * Currently, driver support only one FCF - so treat this as 3055 * Currently, driver support only one FCF - so treat this as
3055 * a link down. 3056 * a link down, but save the link state because we don't want
3057 * it to be changed to Link Down unless it is already down.
3056 */ 3058 */
3059 link_state = phba->link_state;
3057 lpfc_linkdown(phba); 3060 lpfc_linkdown(phba);
3061 phba->link_state = link_state;
3058 /* Unregister FCF if no devices connected to it */ 3062 /* Unregister FCF if no devices connected to it */
3059 lpfc_unregister_unused_fcf(phba); 3063 lpfc_unregister_unused_fcf(phba);
3060 break; 3064 break;
@@ -7226,8 +7230,6 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7226{ 7230{
7227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7228 "2711 PCI channel permanent disable for failure\n"); 7232 "2711 PCI channel permanent disable for failure\n");
7229 /* Block all SCSI devices' I/Os on the host */
7230 lpfc_scsi_dev_block(phba);
7231 /* Clean up all driver's outstanding SCSI I/Os */ 7233 /* Clean up all driver's outstanding SCSI I/Os */
7232 lpfc_sli_flush_fcp_rings(phba); 7234 lpfc_sli_flush_fcp_rings(phba);
7233} 7235}
@@ -7256,6 +7258,9 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7256 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7258 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7257 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7259 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7258 7260
7261 /* Block all SCSI devices' I/Os on the host */
7262 lpfc_scsi_dev_block(phba);
7263
7259 switch (state) { 7264 switch (state) {
7260 case pci_channel_io_normal: 7265 case pci_channel_io_normal:
7261 /* Non-fatal error, prepare for recovery */ 7266 /* Non-fatal error, prepare for recovery */
@@ -7507,6 +7512,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7507 error = -ENODEV; 7512 error = -ENODEV;
7508 goto out_free_sysfs_attr; 7513 goto out_free_sysfs_attr;
7509 } 7514 }
7515 /* Default to single FCP EQ for non-MSI-X */
7516 if (phba->intr_type != MSIX)
7517 phba->cfg_fcp_eq_count = 1;
7510 /* Set up SLI-4 HBA */ 7518 /* Set up SLI-4 HBA */
7511 if (lpfc_sli4_hba_setup(phba)) { 7519 if (lpfc_sli4_hba_setup(phba)) {
7512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7935667b81a5..589549b2bf0e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1383,7 +1383,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1383/* HBQ for ELS and CT traffic. */ 1383/* HBQ for ELS and CT traffic. */
1384static struct lpfc_hbq_init lpfc_els_hbq = { 1384static struct lpfc_hbq_init lpfc_els_hbq = {
1385 .rn = 1, 1385 .rn = 1,
1386 .entry_count = 200, 1386 .entry_count = 256,
1387 .mask_count = 0, 1387 .mask_count = 0,
1388 .profile = 0, 1388 .profile = 0,
1389 .ring_mask = (1 << LPFC_ELS_RING), 1389 .ring_mask = (1 << LPFC_ELS_RING),
@@ -1482,8 +1482,11 @@ err:
1482int 1482int
1483lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1483lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1484{ 1484{
1485 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1485 if (phba->sli_rev == LPFC_SLI_REV4)
1486 lpfc_hbq_defs[qno]->add_count)); 1486 return 0;
1487 else
1488 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1489 lpfc_hbq_defs[qno]->add_count);
1487} 1490}
1488 1491
1489/** 1492/**
@@ -1498,8 +1501,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1498static int 1501static int
1499lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1502lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1500{ 1503{
1501 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1504 if (phba->sli_rev == LPFC_SLI_REV4)
1502 lpfc_hbq_defs[qno]->init_count)); 1505 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1506 lpfc_hbq_defs[qno]->entry_count);
1507 else
1508 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1509 lpfc_hbq_defs[qno]->init_count);
1503} 1510}
1504 1511
1505/** 1512/**
@@ -4110,6 +4117,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4110 if (rc) { 4117 if (rc) {
4111 dma_free_coherent(&phba->pcidev->dev, dma_size, 4118 dma_free_coherent(&phba->pcidev->dev, dma_size,
4112 dmabuf->virt, dmabuf->phys); 4119 dmabuf->virt, dmabuf->phys);
4120 kfree(dmabuf);
4113 return -EIO; 4121 return -EIO;
4114 } 4122 }
4115 4123
@@ -5848,7 +5856,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5848 iocbq->iocb.un.ulpWord[3]); 5856 iocbq->iocb.un.ulpWord[3]);
5849 wqe->generic.word3 = 0; 5857 wqe->generic.word3 = 0;
5850 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); 5858 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5851 bf_set(wqe_xc, &wqe->generic, 1);
5852 /* The entire sequence is transmitted for this IOCB */ 5859 /* The entire sequence is transmitted for this IOCB */
5853 xmit_len = total_len; 5860 xmit_len = total_len;
5854 cmnd = CMD_XMIT_SEQUENCE64_CR; 5861 cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -10944,7 +10951,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10944 return dmabuf; 10951 return dmabuf;
10945 } 10952 }
10946 temp_hdr = seq_dmabuf->hbuf.virt; 10953 temp_hdr = seq_dmabuf->hbuf.virt;
10947 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { 10954 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
10955 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
10948 list_del_init(&seq_dmabuf->hbuf.list); 10956 list_del_init(&seq_dmabuf->hbuf.list);
10949 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 10957 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10950 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 10958 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
@@ -10955,6 +10963,11 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10955 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 10963 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
10956 seq_dmabuf->time_stamp = jiffies; 10964 seq_dmabuf->time_stamp = jiffies;
10957 lpfc_update_rcv_time_stamp(vport); 10965 lpfc_update_rcv_time_stamp(vport);
10966 if (list_empty(&seq_dmabuf->dbuf.list)) {
10967 temp_hdr = dmabuf->hbuf.virt;
10968 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
10969 return seq_dmabuf;
10970 }
10958 /* find the correct place in the sequence to insert this frame */ 10971 /* find the correct place in the sequence to insert this frame */
10959 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 10972 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10960 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 10973 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -10963,7 +10976,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10963 * If the frame's sequence count is greater than the frame on 10976 * If the frame's sequence count is greater than the frame on
10964 * the list then insert the frame right after this frame 10977 * the list then insert the frame right after this frame
10965 */ 10978 */
10966 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { 10979 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
10980 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
10967 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 10981 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10968 return seq_dmabuf; 10982 return seq_dmabuf;
10969 } 10983 }
@@ -11210,7 +11224,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
11210 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11224 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
11211 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11225 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11212 /* If there is a hole in the sequence count then fail. */ 11226 /* If there is a hole in the sequence count then fail. */
11213 if (++seq_count != hdr->fh_seq_cnt) 11227 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
11214 return 0; 11228 return 0;
11215 fctl = (hdr->fh_f_ctl[0] << 16 | 11229 fctl = (hdr->fh_f_ctl[0] << 16 |
11216 hdr->fh_f_ctl[1] << 8 | 11230 hdr->fh_f_ctl[1] << 8 |
@@ -11242,6 +11256,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11242 struct lpfc_iocbq *first_iocbq, *iocbq; 11256 struct lpfc_iocbq *first_iocbq, *iocbq;
11243 struct fc_frame_header *fc_hdr; 11257 struct fc_frame_header *fc_hdr;
11244 uint32_t sid; 11258 uint32_t sid;
11259 struct ulp_bde64 *pbde;
11245 11260
11246 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11261 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11247 /* remove from receive buffer list */ 11262 /* remove from receive buffer list */
@@ -11283,8 +11298,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11283 if (!iocbq->context3) { 11298 if (!iocbq->context3) {
11284 iocbq->context3 = d_buf; 11299 iocbq->context3 = d_buf;
11285 iocbq->iocb.ulpBdeCount++; 11300 iocbq->iocb.ulpBdeCount++;
11286 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = 11301 pbde = (struct ulp_bde64 *)
11287 LPFC_DATA_BUF_SIZE; 11302 &iocbq->iocb.unsli3.sli3Words[4];
11303 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
11288 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11304 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
11289 bf_get(lpfc_rcqe_length, 11305 bf_get(lpfc_rcqe_length,
11290 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 11306 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
@@ -11401,15 +11417,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11401 return; 11417 return;
11402 } 11418 }
11403 /* If not last frame in sequence continue processing frames. */ 11419 /* If not last frame in sequence continue processing frames. */
11404 if (!lpfc_seq_complete(seq_dmabuf)) { 11420 if (!lpfc_seq_complete(seq_dmabuf))
11405 /*
11406 * When saving off frames post a new one and mark this
11407 * frame to be freed when it is finished.
11408 **/
11409 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
11410 dmabuf->tag = -1;
11411 return; 11421 return;
11412 } 11422
11413 /* Send the complete sequence to the upper layer protocol */ 11423 /* Send the complete sequence to the upper layer protocol */
11414 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 11424 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
11415} 11425}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 25d66d070cf8..44e5f574236b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -28,7 +28,7 @@
28/* Multi-queue arrangement for fast-path FCP work queues */ 28/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8 29#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1 30#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1 31#define LPFC_FP_EQN_DEF 4
32#define LPFC_FP_EQN_MIN 1 32#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) 33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34 34
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c7f3aed2aab8..792f72263f1a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.6" 21#define LPFC_DRIVER_VERSION "8.3.7"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 7d6dd83d3592..e3c7fa642306 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -512,8 +512,10 @@ enable_vport(struct fc_vport *fc_vport)
512 return VPORT_OK; 512 return VPORT_OK;
513 } 513 }
514 514
515 spin_lock_irq(&phba->hbalock);
515 vport->load_flag |= FC_LOADING; 516 vport->load_flag |= FC_LOADING;
516 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 517 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
518 spin_unlock_irq(&phba->hbalock);
517 519
518 /* Use the Physical nodes Fabric NDLP to determine if the link is 520 /* Use the Physical nodes Fabric NDLP to determine if the link is
519 * up and ready to FDISC. 521 * up and ready to FDISC.
@@ -700,7 +702,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
700 } 702 }
701 spin_unlock_irq(&phba->ndlp_lock); 703 spin_unlock_irq(&phba->ndlp_lock);
702 } 704 }
703 if (vport->vpi_state != LPFC_VPI_REGISTERED) 705 if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
704 goto skip_logo; 706 goto skip_logo;
705 vport->unreg_vpi_cmpl = VPORT_INVAL; 707 vport->unreg_vpi_cmpl = VPORT_INVAL;
706 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 708 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 99ff99e45bee..708ea3157b60 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -4046,7 +4046,7 @@ megasas_aen_polling(struct work_struct *work)
4046} 4046}
4047 4047
4048 4048
4049static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO, 4049static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
4050 megasas_sysfs_show_poll_mode_io, 4050 megasas_sysfs_show_poll_mode_io,
4051 megasas_sysfs_set_poll_mode_io); 4051 megasas_sysfs_set_poll_mode_io);
4052 4052
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e7d2688fbeba..b6f1ef954af1 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2483,14 +2483,12 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2483 sense_copied = 1; 2483 sense_copied = 1;
2484 } 2484 }
2485 2485
2486 if (RES_IS_GSCSI(res->cfg_entry)) { 2486 if (RES_IS_GSCSI(res->cfg_entry))
2487 pmcraid_cancel_all(cmd, sense_copied); 2487 pmcraid_cancel_all(cmd, sense_copied);
2488 } else if (sense_copied) { 2488 else if (sense_copied)
2489 pmcraid_erp_done(cmd); 2489 pmcraid_erp_done(cmd);
2490 return 0; 2490 else
2491 } else {
2492 pmcraid_request_sense(cmd); 2491 pmcraid_request_sense(cmd);
2493 }
2494 2492
2495 return 1; 2493 return 1;
2496 2494
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 21e2bc4d7401..3a9f5b288aee 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -232,6 +232,9 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
232 if (off) 232 if (off)
233 return 0; 233 return 0;
234 234
235 if (unlikely(pci_channel_offline(ha->pdev)))
236 return 0;
237
235 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 238 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
236 return -EINVAL; 239 return -EINVAL;
237 if (start > ha->optrom_size) 240 if (start > ha->optrom_size)
@@ -379,6 +382,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
379 struct device, kobj))); 382 struct device, kobj)));
380 struct qla_hw_data *ha = vha->hw; 383 struct qla_hw_data *ha = vha->hw;
381 384
385 if (unlikely(pci_channel_offline(ha->pdev)))
386 return 0;
387
382 if (!capable(CAP_SYS_ADMIN)) 388 if (!capable(CAP_SYS_ADMIN))
383 return 0; 389 return 0;
384 390
@@ -398,6 +404,9 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
398 struct qla_hw_data *ha = vha->hw; 404 struct qla_hw_data *ha = vha->hw;
399 uint8_t *tmp_data; 405 uint8_t *tmp_data;
400 406
407 if (unlikely(pci_channel_offline(ha->pdev)))
408 return 0;
409
401 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || 410 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
402 !ha->isp_ops->write_nvram) 411 !ha->isp_ops->write_nvram)
403 return 0; 412 return 0;
@@ -1238,10 +1247,11 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1238 char *buf) 1247 char *buf)
1239{ 1248{
1240 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1249 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1241 int rval; 1250 int rval = QLA_FUNCTION_FAILED;
1242 uint16_t state[5]; 1251 uint16_t state[5];
1243 1252
1244 rval = qla2x00_get_firmware_state(vha, state); 1253 if (!vha->hw->flags.eeh_busy)
1254 rval = qla2x00_get_firmware_state(vha, state);
1245 if (rval != QLA_SUCCESS) 1255 if (rval != QLA_SUCCESS)
1246 memset(state, -1, sizeof(state)); 1256 memset(state, -1, sizeof(state));
1247 1257
@@ -1452,10 +1462,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1452 if (!fcport) 1462 if (!fcport)
1453 return; 1463 return;
1454 1464
1455 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) 1465 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1466 return;
1467
1468 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1456 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1469 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1457 else 1470 return;
1458 qla2x00_abort_fcport_cmds(fcport); 1471 }
1459 1472
1460 /* 1473 /*
1461 * Transport has effectively 'deleted' the rport, clear 1474 * Transport has effectively 'deleted' the rport, clear
@@ -1475,6 +1488,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1475 if (!fcport) 1488 if (!fcport)
1476 return; 1489 return;
1477 1490
1491 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1492 return;
1493
1478 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 1494 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1479 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1495 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1480 return; 1496 return;
@@ -1515,6 +1531,12 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1515 pfc_host_stat = &ha->fc_host_stat; 1531 pfc_host_stat = &ha->fc_host_stat;
1516 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 1532 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1517 1533
1534 if (test_bit(UNLOADING, &vha->dpc_flags))
1535 goto done;
1536
1537 if (unlikely(pci_channel_offline(ha->pdev)))
1538 goto done;
1539
1518 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1540 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1519 if (stats == NULL) { 1541 if (stats == NULL) {
1520 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1542 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f660dd70b72e..d6d9c86cb058 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -26,7 +26,7 @@
26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ 26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ 27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ 28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
29/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */ 29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30 30
31/* 31/*
32* Macros use for debugging the driver. 32* Macros use for debugging the driver.
@@ -132,6 +132,13 @@
132#else 132#else
133#define DEBUG16(x) do {} while (0) 133#define DEBUG16(x) do {} while (0)
134#endif 134#endif
135
136#if defined(QL_DEBUG_LEVEL_17)
137#define DEBUG17(x) do {x;} while (0)
138#else
139#define DEBUG17(x) do {} while (0)
140#endif
141
135/* 142/*
136 * Firmware Dump structure definition 143 * Firmware Dump structure definition
137 */ 144 */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 384afda7dbe9..1263d9796e89 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1586,8 +1586,7 @@ typedef struct fc_port {
1586 */ 1586 */
1587#define FCF_FABRIC_DEVICE BIT_0 1587#define FCF_FABRIC_DEVICE BIT_0
1588#define FCF_LOGIN_NEEDED BIT_1 1588#define FCF_LOGIN_NEEDED BIT_1
1589#define FCF_TAPE_PRESENT BIT_2 1589#define FCF_FCP2_DEVICE BIT_2
1590#define FCF_FCP2_DEVICE BIT_3
1591 1590
1592/* No loop ID flag. */ 1591/* No loop ID flag. */
1593#define FC_NO_LOOP_ID 0x1000 1592#define FC_NO_LOOP_ID 0x1000
@@ -2256,11 +2255,13 @@ struct qla_hw_data {
2256 uint32_t disable_serdes :1; 2255 uint32_t disable_serdes :1;
2257 uint32_t gpsc_supported :1; 2256 uint32_t gpsc_supported :1;
2258 uint32_t npiv_supported :1; 2257 uint32_t npiv_supported :1;
2258 uint32_t pci_channel_io_perm_failure :1;
2259 uint32_t fce_enabled :1; 2259 uint32_t fce_enabled :1;
2260 uint32_t fac_supported :1; 2260 uint32_t fac_supported :1;
2261 uint32_t chip_reset_done :1; 2261 uint32_t chip_reset_done :1;
2262 uint32_t port0 :1; 2262 uint32_t port0 :1;
2263 uint32_t running_gold_fw :1; 2263 uint32_t running_gold_fw :1;
2264 uint32_t eeh_busy :1;
2264 uint32_t cpu_affinity_enabled :1; 2265 uint32_t cpu_affinity_enabled :1;
2265 uint32_t disable_msix_handshake :1; 2266 uint32_t disable_msix_handshake :1;
2266 } flags; 2267 } flags;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b6801fc6389..8bc6f53691e9 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -324,6 +324,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
324extern int 324extern int
325qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); 325qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
326 326
327extern int qla2x00_get_data_rate(scsi_qla_host_t *);
327/* 328/*
328 * Global Function Prototypes in qla_isr.c source file. 329 * Global Function Prototypes in qla_isr.c source file.
329 */ 330 */
@@ -452,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
452extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 453extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
453extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 454extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
454extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 455extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
455extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
456 456
457#endif /* _QLA_GBL_H */ 457#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 73a793539d45..3f8e8495b743 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -205,7 +205,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
205 205
206 switch (data[0]) { 206 switch (data[0]) {
207 case MBS_COMMAND_COMPLETE: 207 case MBS_COMMAND_COMPLETE:
208 if (fcport->flags & FCF_TAPE_PRESENT) 208 if (fcport->flags & FCF_FCP2_DEVICE)
209 opts |= BIT_1; 209 opts |= BIT_1;
210 rval = qla2x00_get_port_database(vha, fcport, opts); 210 rval = qla2x00_get_port_database(vha, fcport, opts);
211 if (rval != QLA_SUCCESS) 211 if (rval != QLA_SUCCESS)
@@ -269,6 +269,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
269 vha->flags.online = 0; 269 vha->flags.online = 0;
270 ha->flags.chip_reset_done = 0; 270 ha->flags.chip_reset_done = 0;
271 vha->flags.reset_active = 0; 271 vha->flags.reset_active = 0;
272 ha->flags.pci_channel_io_perm_failure = 0;
273 ha->flags.eeh_busy = 0;
272 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 274 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
273 atomic_set(&vha->loop_state, LOOP_DOWN); 275 atomic_set(&vha->loop_state, LOOP_DOWN);
274 vha->device_flags = DFLG_NO_CABLE; 276 vha->device_flags = DFLG_NO_CABLE;
@@ -581,6 +583,9 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
581 uint32_t cnt; 583 uint32_t cnt;
582 uint16_t cmd; 584 uint16_t cmd;
583 585
586 if (unlikely(pci_channel_offline(ha->pdev)))
587 return;
588
584 ha->isp_ops->disable_intrs(ha); 589 ha->isp_ops->disable_intrs(ha);
585 590
586 spin_lock_irqsave(&ha->hardware_lock, flags); 591 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -786,6 +791,12 @@ void
786qla24xx_reset_chip(scsi_qla_host_t *vha) 791qla24xx_reset_chip(scsi_qla_host_t *vha)
787{ 792{
788 struct qla_hw_data *ha = vha->hw; 793 struct qla_hw_data *ha = vha->hw;
794
795 if (pci_channel_offline(ha->pdev) &&
796 ha->flags.pci_channel_io_perm_failure) {
797 return;
798 }
799
789 ha->isp_ops->disable_intrs(ha); 800 ha->isp_ops->disable_intrs(ha);
790 801
791 /* Perform RISC reset. */ 802 /* Perform RISC reset. */
@@ -2266,6 +2277,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2266 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2277 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2267 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 2278 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2268 2279
2280 qla2x00_get_data_rate(vha);
2281
2269 /* Determine what we need to do */ 2282 /* Determine what we need to do */
2270 if (ha->current_topology == ISP_CFG_FL && 2283 if (ha->current_topology == ISP_CFG_FL &&
2271 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2284 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
@@ -2713,7 +2726,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2713 2726
2714 /* 2727 /*
2715 * Logout all previous fabric devices marked lost, except 2728 * Logout all previous fabric devices marked lost, except
2716 * tape devices. 2729 * FCP2 devices.
2717 */ 2730 */
2718 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2731 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2719 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2732 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
@@ -2726,7 +2739,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2726 qla2x00_mark_device_lost(vha, fcport, 2739 qla2x00_mark_device_lost(vha, fcport,
2727 ql2xplogiabsentdevice, 0); 2740 ql2xplogiabsentdevice, 0);
2728 if (fcport->loop_id != FC_NO_LOOP_ID && 2741 if (fcport->loop_id != FC_NO_LOOP_ID &&
2729 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2742 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
2730 fcport->port_type != FCT_INITIATOR && 2743 fcport->port_type != FCT_INITIATOR &&
2731 fcport->port_type != FCT_BROADCAST) { 2744 fcport->port_type != FCT_BROADCAST) {
2732 ha->isp_ops->fabric_logout(vha, 2745 ha->isp_ops->fabric_logout(vha,
@@ -3005,7 +3018,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3005 fcport->d_id.b24 = new_fcport->d_id.b24; 3018 fcport->d_id.b24 = new_fcport->d_id.b24;
3006 fcport->flags |= FCF_LOGIN_NEEDED; 3019 fcport->flags |= FCF_LOGIN_NEEDED;
3007 if (fcport->loop_id != FC_NO_LOOP_ID && 3020 if (fcport->loop_id != FC_NO_LOOP_ID &&
3008 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 3021 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3009 fcport->port_type != FCT_INITIATOR && 3022 fcport->port_type != FCT_INITIATOR &&
3010 fcport->port_type != FCT_BROADCAST) { 3023 fcport->port_type != FCT_BROADCAST) {
3011 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3024 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3259,9 +3272,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3259 3272
3260 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 3273 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3261 if (rval == QLA_SUCCESS) { 3274 if (rval == QLA_SUCCESS) {
3262 /* Send an ADISC to tape devices.*/ 3275 /* Send an ADISC to FCP2 devices.*/
3263 opts = 0; 3276 opts = 0;
3264 if (fcport->flags & FCF_TAPE_PRESENT) 3277 if (fcport->flags & FCF_FCP2_DEVICE)
3265 opts |= BIT_1; 3278 opts |= BIT_1;
3266 rval = qla2x00_get_port_database(vha, fcport, opts); 3279 rval = qla2x00_get_port_database(vha, fcport, opts);
3267 if (rval != QLA_SUCCESS) { 3280 if (rval != QLA_SUCCESS) {
@@ -3560,6 +3573,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3560 /* Requeue all commands in outstanding command list. */ 3573 /* Requeue all commands in outstanding command list. */
3561 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 3574 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3562 3575
3576 if (unlikely(pci_channel_offline(ha->pdev) &&
3577 ha->flags.pci_channel_io_perm_failure)) {
3578 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3579 status = 0;
3580 return status;
3581 }
3582
3563 ha->isp_ops->get_flash_version(vha, req->ring); 3583 ha->isp_ops->get_flash_version(vha, req->ring);
3564 3584
3565 ha->isp_ops->nvram_config(vha); 3585 ha->isp_ops->nvram_config(vha);
@@ -4458,6 +4478,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4458 int ret, retries; 4478 int ret, retries;
4459 struct qla_hw_data *ha = vha->hw; 4479 struct qla_hw_data *ha = vha->hw;
4460 4480
4481 if (ha->flags.pci_channel_io_perm_failure)
4482 return;
4461 if (!IS_FWI2_CAPABLE(ha)) 4483 if (!IS_FWI2_CAPABLE(ha))
4462 return; 4484 return;
4463 if (!ha->fw_major_version) 4485 if (!ha->fw_major_version)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 1692a883f4de..6fc63b98818c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -152,7 +152,7 @@ qla2300_intr_handler(int irq, void *dev_id)
152 for (iter = 50; iter--; ) { 152 for (iter = 50; iter--; ) {
153 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 153 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
154 if (stat & HSR_RISC_PAUSED) { 154 if (stat & HSR_RISC_PAUSED) {
155 if (pci_channel_offline(ha->pdev)) 155 if (unlikely(pci_channel_offline(ha->pdev)))
156 break; 156 break;
157 157
158 hccr = RD_REG_WORD(&reg->hccr); 158 hccr = RD_REG_WORD(&reg->hccr);
@@ -1846,12 +1846,15 @@ qla24xx_intr_handler(int irq, void *dev_id)
1846 reg = &ha->iobase->isp24; 1846 reg = &ha->iobase->isp24;
1847 status = 0; 1847 status = 0;
1848 1848
1849 if (unlikely(pci_channel_offline(ha->pdev)))
1850 return IRQ_HANDLED;
1851
1849 spin_lock_irqsave(&ha->hardware_lock, flags); 1852 spin_lock_irqsave(&ha->hardware_lock, flags);
1850 vha = pci_get_drvdata(ha->pdev); 1853 vha = pci_get_drvdata(ha->pdev);
1851 for (iter = 50; iter--; ) { 1854 for (iter = 50; iter--; ) {
1852 stat = RD_REG_DWORD(&reg->host_status); 1855 stat = RD_REG_DWORD(&reg->host_status);
1853 if (stat & HSRX_RISC_PAUSED) { 1856 if (stat & HSRX_RISC_PAUSED) {
1854 if (pci_channel_offline(ha->pdev)) 1857 if (unlikely(pci_channel_offline(ha->pdev)))
1855 break; 1858 break;
1856 1859
1857 hccr = RD_REG_DWORD(&reg->hccr); 1860 hccr = RD_REG_DWORD(&reg->hccr);
@@ -1914,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1914 struct rsp_que *rsp; 1917 struct rsp_que *rsp;
1915 struct device_reg_24xx __iomem *reg; 1918 struct device_reg_24xx __iomem *reg;
1916 struct scsi_qla_host *vha; 1919 struct scsi_qla_host *vha;
1920 unsigned long flags;
1917 1921
1918 rsp = (struct rsp_que *) dev_id; 1922 rsp = (struct rsp_que *) dev_id;
1919 if (!rsp) { 1923 if (!rsp) {
@@ -1924,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1924 ha = rsp->hw; 1928 ha = rsp->hw;
1925 reg = &ha->iobase->isp24; 1929 reg = &ha->iobase->isp24;
1926 1930
1927 spin_lock_irq(&ha->hardware_lock); 1931 spin_lock_irqsave(&ha->hardware_lock, flags);
1928 1932
1929 vha = qla25xx_get_host(rsp); 1933 vha = pci_get_drvdata(ha->pdev);
1930 qla24xx_process_response_queue(vha, rsp); 1934 qla24xx_process_response_queue(vha, rsp);
1931 if (!ha->flags.disable_msix_handshake) { 1935 if (!ha->flags.disable_msix_handshake) {
1932 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1936 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1933 RD_REG_DWORD_RELAXED(&reg->hccr); 1937 RD_REG_DWORD_RELAXED(&reg->hccr);
1934 } 1938 }
1935 spin_unlock_irq(&ha->hardware_lock); 1939 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1936 1940
1937 return IRQ_HANDLED; 1941 return IRQ_HANDLED;
1938} 1942}
@@ -1943,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1943 struct qla_hw_data *ha; 1947 struct qla_hw_data *ha;
1944 struct rsp_que *rsp; 1948 struct rsp_que *rsp;
1945 struct device_reg_24xx __iomem *reg; 1949 struct device_reg_24xx __iomem *reg;
1950 unsigned long flags;
1946 1951
1947 rsp = (struct rsp_que *) dev_id; 1952 rsp = (struct rsp_que *) dev_id;
1948 if (!rsp) { 1953 if (!rsp) {
@@ -1955,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1955 /* Clear the interrupt, if enabled, for this response queue */ 1960 /* Clear the interrupt, if enabled, for this response queue */
1956 if (rsp->options & ~BIT_6) { 1961 if (rsp->options & ~BIT_6) {
1957 reg = &ha->iobase->isp24; 1962 reg = &ha->iobase->isp24;
1958 spin_lock_irq(&ha->hardware_lock); 1963 spin_lock_irqsave(&ha->hardware_lock, flags);
1959 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1964 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1960 RD_REG_DWORD_RELAXED(&reg->hccr); 1965 RD_REG_DWORD_RELAXED(&reg->hccr);
1961 spin_unlock_irq(&ha->hardware_lock); 1966 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1962 } 1967 }
1963 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 1968 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
1964 1969
@@ -1976,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1976 uint32_t stat; 1981 uint32_t stat;
1977 uint32_t hccr; 1982 uint32_t hccr;
1978 uint16_t mb[4]; 1983 uint16_t mb[4];
1984 unsigned long flags;
1979 1985
1980 rsp = (struct rsp_que *) dev_id; 1986 rsp = (struct rsp_que *) dev_id;
1981 if (!rsp) { 1987 if (!rsp) {
@@ -1987,12 +1993,12 @@ qla24xx_msix_default(int irq, void *dev_id)
1987 reg = &ha->iobase->isp24; 1993 reg = &ha->iobase->isp24;
1988 status = 0; 1994 status = 0;
1989 1995
1990 spin_lock_irq(&ha->hardware_lock); 1996 spin_lock_irqsave(&ha->hardware_lock, flags);
1991 vha = pci_get_drvdata(ha->pdev); 1997 vha = pci_get_drvdata(ha->pdev);
1992 do { 1998 do {
1993 stat = RD_REG_DWORD(&reg->host_status); 1999 stat = RD_REG_DWORD(&reg->host_status);
1994 if (stat & HSRX_RISC_PAUSED) { 2000 if (stat & HSRX_RISC_PAUSED) {
1995 if (pci_channel_offline(ha->pdev)) 2001 if (unlikely(pci_channel_offline(ha->pdev)))
1996 break; 2002 break;
1997 2003
1998 hccr = RD_REG_DWORD(&reg->hccr); 2004 hccr = RD_REG_DWORD(&reg->hccr);
@@ -2036,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id)
2036 } 2042 }
2037 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2043 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2038 } while (0); 2044 } while (0);
2039 spin_unlock_irq(&ha->hardware_lock); 2045 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2040 2046
2041 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2047 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2042 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2048 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -2274,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2274 msix->rsp = rsp; 2280 msix->rsp = rsp;
2275 return ret; 2281 return ret;
2276} 2282}
2277
2278struct scsi_qla_host *
2279qla25xx_get_host(struct rsp_que *rsp)
2280{
2281 srb_t *sp;
2282 struct qla_hw_data *ha = rsp->hw;
2283 struct scsi_qla_host *vha = NULL;
2284 struct sts_entry_24xx *pkt;
2285 struct req_que *req;
2286 uint16_t que;
2287 uint32_t handle;
2288
2289 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2290 que = MSW(pkt->handle);
2291 handle = (uint32_t) LSW(pkt->handle);
2292 req = ha->req_q_map[que];
2293 if (handle < MAX_OUTSTANDING_COMMANDS) {
2294 sp = req->outstanding_cmds[handle];
2295 if (sp)
2296 return sp->fcport->vha;
2297 else
2298 goto base_que;
2299 }
2300base_que:
2301 vha = pci_get_drvdata(ha->pdev);
2302 return vha;
2303}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 05d595d9a7ef..056e4d4505f3 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -56,6 +56,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
56 56
57 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no)); 57 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
58 58
59 if (ha->flags.pci_channel_io_perm_failure) {
60 DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX "
61 "Exiting.\n", __func__, vha->host_no));
62 return QLA_FUNCTION_TIMEOUT;
63 }
64
59 /* 65 /*
60 * Wait for active mailbox commands to finish by waiting at most tov 66 * Wait for active mailbox commands to finish by waiting at most tov
61 * seconds. This is to serialize actual issuing of mailbox cmds during 67 * seconds. This is to serialize actual issuing of mailbox cmds during
@@ -154,10 +160,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
154 /* Check for pending interrupts. */ 160 /* Check for pending interrupts. */
155 qla2x00_poll(ha->rsp_q_map[0]); 161 qla2x00_poll(ha->rsp_q_map[0]);
156 162
157 if (command != MBC_LOAD_RISC_RAM_EXTENDED && 163 if (!ha->flags.mbox_int &&
158 !ha->flags.mbox_int) 164 !(IS_QLA2200(ha) &&
165 command == MBC_LOAD_RISC_RAM_EXTENDED))
159 msleep(10); 166 msleep(10);
160 } /* while */ 167 } /* while */
168 DEBUG17(qla_printk(KERN_WARNING, ha,
169 "Waited %d sec\n",
170 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)));
161 } 171 }
162 172
163 /* Check whether we timed out */ 173 /* Check whether we timed out */
@@ -227,7 +237,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
227 237
228 if (rval == QLA_FUNCTION_TIMEOUT && 238 if (rval == QLA_FUNCTION_TIMEOUT &&
229 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 239 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
230 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { 240 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
241 ha->flags.eeh_busy) {
231 /* not in dpc. schedule it for dpc to take over. */ 242 /* not in dpc. schedule it for dpc to take over. */
232 DEBUG(printk("%s(%ld): timeout schedule " 243 DEBUG(printk("%s(%ld): timeout schedule "
233 "isp_abort_needed.\n", __func__, 244 "isp_abort_needed.\n", __func__,
@@ -237,7 +248,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
237 base_vha->host_no)); 248 base_vha->host_no));
238 qla_printk(KERN_WARNING, ha, 249 qla_printk(KERN_WARNING, ha,
239 "Mailbox command timeout occurred. Scheduling ISP " 250 "Mailbox command timeout occurred. Scheduling ISP "
240 "abort.\n"); 251 "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy);
241 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 252 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
242 qla2xxx_wake_dpc(vha); 253 qla2xxx_wake_dpc(vha);
243 } else if (!abort_active) { 254 } else if (!abort_active) {
@@ -2530,6 +2541,9 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2530 if (!IS_FWI2_CAPABLE(vha->hw)) 2541 if (!IS_FWI2_CAPABLE(vha->hw))
2531 return QLA_FUNCTION_FAILED; 2542 return QLA_FUNCTION_FAILED;
2532 2543
2544 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2545 return QLA_FUNCTION_FAILED;
2546
2533 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2547 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2534 2548
2535 mcp->mb[0] = MBC_TRACE_CONTROL; 2549 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2565,6 +2579,9 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2565 if (!IS_FWI2_CAPABLE(vha->hw)) 2579 if (!IS_FWI2_CAPABLE(vha->hw))
2566 return QLA_FUNCTION_FAILED; 2580 return QLA_FUNCTION_FAILED;
2567 2581
2582 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2583 return QLA_FUNCTION_FAILED;
2584
2568 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2585 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2569 2586
2570 mcp->mb[0] = MBC_TRACE_CONTROL; 2587 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2595,6 +2612,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2595 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) 2612 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2596 return QLA_FUNCTION_FAILED; 2613 return QLA_FUNCTION_FAILED;
2597 2614
2615 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2616 return QLA_FUNCTION_FAILED;
2617
2598 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2618 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2599 2619
2600 mcp->mb[0] = MBC_TRACE_CONTROL; 2620 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2639,6 +2659,9 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2639 if (!IS_FWI2_CAPABLE(vha->hw)) 2659 if (!IS_FWI2_CAPABLE(vha->hw))
2640 return QLA_FUNCTION_FAILED; 2660 return QLA_FUNCTION_FAILED;
2641 2661
2662 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2663 return QLA_FUNCTION_FAILED;
2664
2642 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2665 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2643 2666
2644 mcp->mb[0] = MBC_TRACE_CONTROL; 2667 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -3643,3 +3666,36 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3643 3666
3644 return rval; 3667 return rval;
3645} 3668}
3669
3670int
3671qla2x00_get_data_rate(scsi_qla_host_t *vha)
3672{
3673 int rval;
3674 mbx_cmd_t mc;
3675 mbx_cmd_t *mcp = &mc;
3676 struct qla_hw_data *ha = vha->hw;
3677
3678 if (!IS_FWI2_CAPABLE(ha))
3679 return QLA_FUNCTION_FAILED;
3680
3681 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
3682
3683 mcp->mb[0] = MBC_DATA_RATE;
3684 mcp->mb[1] = 0;
3685 mcp->out_mb = MBX_1|MBX_0;
3686 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3687 mcp->tov = MBX_TOV_SECONDS;
3688 mcp->flags = 0;
3689 rval = qla2x00_mailbox_command(vha, mcp);
3690 if (rval != QLA_SUCCESS) {
3691 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
3692 __func__, vha->host_no, rval, mcp->mb[0]));
3693 } else {
3694 DEBUG11(printk(KERN_INFO
3695 "%s(%ld): done.\n", __func__, vha->host_no));
3696 if (mcp->mb[1] != 0x7)
3697 ha->link_data_rate = mcp->mb[1];
3698 }
3699
3700 return rval;
3701}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 2a4c7f4e7b69..ff17dee28613 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -636,11 +636,15 @@ failed:
636 636
637static void qla_do_work(struct work_struct *work) 637static void qla_do_work(struct work_struct *work)
638{ 638{
639 unsigned long flags;
639 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); 640 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
640 struct scsi_qla_host *vha; 641 struct scsi_qla_host *vha;
642 struct qla_hw_data *ha = rsp->hw;
641 643
642 vha = qla25xx_get_host(rsp); 644 spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
645 vha = pci_get_drvdata(ha->pdev);
643 qla24xx_process_response_queue(vha, rsp); 646 qla24xx_process_response_queue(vha, rsp);
647 spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
644} 648}
645 649
646/* create response queue */ 650/* create response queue */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2f873d237325..8529eb1f3cd4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -475,11 +475,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
475 srb_t *sp; 475 srb_t *sp;
476 int rval; 476 int rval;
477 477
478 if (unlikely(pci_channel_offline(ha->pdev))) { 478 if (ha->flags.eeh_busy) {
479 if (ha->pdev->error_state == pci_channel_io_frozen) 479 if (ha->flags.pci_channel_io_perm_failure)
480 cmd->result = DID_REQUEUE << 16;
481 else
482 cmd->result = DID_NO_CONNECT << 16; 480 cmd->result = DID_NO_CONNECT << 16;
481 else
482 cmd->result = DID_REQUEUE << 16;
483 goto qc24_fail_command; 483 goto qc24_fail_command;
484 } 484 }
485 485
@@ -552,8 +552,15 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
552#define ABORT_POLLING_PERIOD 1000 552#define ABORT_POLLING_PERIOD 1000
553#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 553#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
554 unsigned long wait_iter = ABORT_WAIT_ITER; 554 unsigned long wait_iter = ABORT_WAIT_ITER;
555 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
556 struct qla_hw_data *ha = vha->hw;
555 int ret = QLA_SUCCESS; 557 int ret = QLA_SUCCESS;
556 558
559 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
560 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
561 return ret;
562 }
563
557 while (CMD_SP(cmd) && wait_iter--) { 564 while (CMD_SP(cmd) && wait_iter--) {
558 msleep(ABORT_POLLING_PERIOD); 565 msleep(ABORT_POLLING_PERIOD);
559 } 566 }
@@ -1181,7 +1188,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1181 scsi_qla_host_t *vha = shost_priv(sdev->host); 1188 scsi_qla_host_t *vha = shost_priv(sdev->host);
1182 struct qla_hw_data *ha = vha->hw; 1189 struct qla_hw_data *ha = vha->hw;
1183 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1190 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1184 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1185 struct req_que *req = vha->req; 1191 struct req_que *req = vha->req;
1186 1192
1187 if (sdev->tagged_supported) 1193 if (sdev->tagged_supported)
@@ -1190,8 +1196,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1190 scsi_deactivate_tcq(sdev, req->max_q_depth); 1196 scsi_deactivate_tcq(sdev, req->max_q_depth);
1191 1197
1192 rport->dev_loss_tmo = ha->port_down_retry_count; 1198 rport->dev_loss_tmo = ha->port_down_retry_count;
1193 if (sdev->type == TYPE_TAPE)
1194 fcport->flags |= FCF_TAPE_PRESENT;
1195 1199
1196 return 0; 1200 return 0;
1197} 1201}
@@ -1810,6 +1814,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1810 1814
1811 /* Set ISP-type information. */ 1815 /* Set ISP-type information. */
1812 qla2x00_set_isp_flags(ha); 1816 qla2x00_set_isp_flags(ha);
1817
1818 /* Set EEH reset type to fundamental if required by hba */
1819 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
1820 pdev->needs_freset = 1;
1821 pci_save_state(pdev);
1822 }
1823
1813 /* Configure PCI I/O space */ 1824 /* Configure PCI I/O space */
1814 ret = qla2x00_iospace_config(ha); 1825 ret = qla2x00_iospace_config(ha);
1815 if (ret) 1826 if (ret)
@@ -2174,6 +2185,24 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2174{ 2185{
2175 struct qla_hw_data *ha = vha->hw; 2186 struct qla_hw_data *ha = vha->hw;
2176 2187
2188 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
2189
2190 /* Disable timer */
2191 if (vha->timer_active)
2192 qla2x00_stop_timer(vha);
2193
2194 /* Kill the kernel thread for this host */
2195 if (ha->dpc_thread) {
2196 struct task_struct *t = ha->dpc_thread;
2197
2198 /*
2199 * qla2xxx_wake_dpc checks for ->dpc_thread
2200 * so we need to zero it out.
2201 */
2202 ha->dpc_thread = NULL;
2203 kthread_stop(t);
2204 }
2205
2177 qla25xx_delete_queues(vha); 2206 qla25xx_delete_queues(vha);
2178 2207
2179 if (ha->flags.fce_enabled) 2208 if (ha->flags.fce_enabled)
@@ -2185,6 +2214,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2185 /* Stop currently executing firmware. */ 2214 /* Stop currently executing firmware. */
2186 qla2x00_try_to_stop_firmware(vha); 2215 qla2x00_try_to_stop_firmware(vha);
2187 2216
2217 vha->flags.online = 0;
2218
2188 /* turn-off interrupts on the card */ 2219 /* turn-off interrupts on the card */
2189 if (ha->interrupts_on) 2220 if (ha->interrupts_on)
2190 ha->isp_ops->disable_intrs(ha); 2221 ha->isp_ops->disable_intrs(ha);
@@ -2771,7 +2802,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
2771 2802
2772 fcport->login_retry--; 2803 fcport->login_retry--;
2773 if (fcport->flags & FCF_FABRIC_DEVICE) { 2804 if (fcport->flags & FCF_FABRIC_DEVICE) {
2774 if (fcport->flags & FCF_TAPE_PRESENT) 2805 if (fcport->flags & FCF_FCP2_DEVICE)
2775 ha->isp_ops->fabric_logout(vha, 2806 ha->isp_ops->fabric_logout(vha,
2776 fcport->loop_id, 2807 fcport->loop_id,
2777 fcport->d_id.b.domain, 2808 fcport->d_id.b.domain,
@@ -2859,6 +2890,13 @@ qla2x00_do_dpc(void *data)
2859 if (!base_vha->flags.init_done) 2890 if (!base_vha->flags.init_done)
2860 continue; 2891 continue;
2861 2892
2893 if (ha->flags.eeh_busy) {
2894 DEBUG17(qla_printk(KERN_WARNING, ha,
2895 "qla2x00_do_dpc: dpc_flags: %lx\n",
2896 base_vha->dpc_flags));
2897 continue;
2898 }
2899
2862 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no)); 2900 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
2863 2901
2864 ha->dpc_active = 1; 2902 ha->dpc_active = 1;
@@ -3049,8 +3087,13 @@ qla2x00_timer(scsi_qla_host_t *vha)
3049 int index; 3087 int index;
3050 srb_t *sp; 3088 srb_t *sp;
3051 int t; 3089 int t;
3090 uint16_t w;
3052 struct qla_hw_data *ha = vha->hw; 3091 struct qla_hw_data *ha = vha->hw;
3053 struct req_que *req; 3092 struct req_que *req;
3093
3094 /* Hardware read to raise pending EEH errors during mailbox waits. */
3095 if (!pci_channel_offline(ha->pdev))
3096 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3054 /* 3097 /*
3055 * Ports - Port down timer. 3098 * Ports - Port down timer.
3056 * 3099 *
@@ -3095,7 +3138,10 @@ qla2x00_timer(scsi_qla_host_t *vha)
3095 if (!IS_QLA2100(ha) && vha->link_down_timeout) 3138 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3096 atomic_set(&vha->loop_state, LOOP_DEAD); 3139 atomic_set(&vha->loop_state, LOOP_DEAD);
3097 3140
3098 /* Schedule an ISP abort to return any tape commands. */ 3141 /*
3142 * Schedule an ISP abort to return any FCP2-device
3143 * commands.
3144 */
3099 /* NPIV - scan physical port only */ 3145 /* NPIV - scan physical port only */
3100 if (!vha->vp_idx) { 3146 if (!vha->vp_idx) {
3101 spin_lock_irqsave(&ha->hardware_lock, 3147 spin_lock_irqsave(&ha->hardware_lock,
@@ -3112,7 +3158,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3112 if (sp->ctx) 3158 if (sp->ctx)
3113 continue; 3159 continue;
3114 sfcp = sp->fcport; 3160 sfcp = sp->fcport;
3115 if (!(sfcp->flags & FCF_TAPE_PRESENT)) 3161 if (!(sfcp->flags & FCF_FCP2_DEVICE))
3116 continue; 3162 continue;
3117 3163
3118 set_bit(ISP_ABORT_NEEDED, 3164 set_bit(ISP_ABORT_NEEDED,
@@ -3252,16 +3298,23 @@ qla2x00_release_firmware(void)
3252static pci_ers_result_t 3298static pci_ers_result_t
3253qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 3299qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3254{ 3300{
3255 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 3301 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3302 struct qla_hw_data *ha = vha->hw;
3303
3304 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
3305 state));
3256 3306
3257 switch (state) { 3307 switch (state) {
3258 case pci_channel_io_normal: 3308 case pci_channel_io_normal:
3309 ha->flags.eeh_busy = 0;
3259 return PCI_ERS_RESULT_CAN_RECOVER; 3310 return PCI_ERS_RESULT_CAN_RECOVER;
3260 case pci_channel_io_frozen: 3311 case pci_channel_io_frozen:
3312 ha->flags.eeh_busy = 1;
3261 pci_disable_device(pdev); 3313 pci_disable_device(pdev);
3262 return PCI_ERS_RESULT_NEED_RESET; 3314 return PCI_ERS_RESULT_NEED_RESET;
3263 case pci_channel_io_perm_failure: 3315 case pci_channel_io_perm_failure:
3264 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 3316 ha->flags.pci_channel_io_perm_failure = 1;
3317 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3265 return PCI_ERS_RESULT_DISCONNECT; 3318 return PCI_ERS_RESULT_DISCONNECT;
3266 } 3319 }
3267 return PCI_ERS_RESULT_NEED_RESET; 3320 return PCI_ERS_RESULT_NEED_RESET;
@@ -3312,6 +3365,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3312 struct qla_hw_data *ha = base_vha->hw; 3365 struct qla_hw_data *ha = base_vha->hw;
3313 int rc; 3366 int rc;
3314 3367
3368 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
3369
3315 if (ha->mem_only) 3370 if (ha->mem_only)
3316 rc = pci_enable_device_mem(pdev); 3371 rc = pci_enable_device_mem(pdev);
3317 else 3372 else
@@ -3320,19 +3375,33 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3320 if (rc) { 3375 if (rc) {
3321 qla_printk(KERN_WARNING, ha, 3376 qla_printk(KERN_WARNING, ha,
3322 "Can't re-enable PCI device after reset.\n"); 3377 "Can't re-enable PCI device after reset.\n");
3323
3324 return ret; 3378 return ret;
3325 } 3379 }
3326 pci_set_master(pdev);
3327 3380
3328 if (ha->isp_ops->pci_config(base_vha)) 3381 if (ha->isp_ops->pci_config(base_vha))
3329 return ret; 3382 return ret;
3330 3383
3384#ifdef QL_DEBUG_LEVEL_17
3385 {
3386 uint8_t b;
3387 uint32_t i;
3388
3389 printk("slot_reset_1: ");
3390 for (i = 0; i < 256; i++) {
3391 pci_read_config_byte(ha->pdev, i, &b);
3392 printk("%s%02x", (i%16) ? " " : "\n", b);
3393 }
3394 printk("\n");
3395 }
3396#endif
3331 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3397 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3332 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) 3398 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
3333 ret = PCI_ERS_RESULT_RECOVERED; 3399 ret = PCI_ERS_RESULT_RECOVERED;
3334 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3400 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3335 3401
3402 DEBUG17(qla_printk(KERN_WARNING, ha,
3403 "slot_reset-return:ret=%x\n", ret));
3404
3336 return ret; 3405 return ret;
3337} 3406}
3338 3407
@@ -3343,12 +3412,17 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
3343 struct qla_hw_data *ha = base_vha->hw; 3412 struct qla_hw_data *ha = base_vha->hw;
3344 int ret; 3413 int ret;
3345 3414
3415 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
3416
3346 ret = qla2x00_wait_for_hba_online(base_vha); 3417 ret = qla2x00_wait_for_hba_online(base_vha);
3347 if (ret != QLA_SUCCESS) { 3418 if (ret != QLA_SUCCESS) {
3348 qla_printk(KERN_ERR, ha, 3419 qla_printk(KERN_ERR, ha,
3349 "the device failed to resume I/O " 3420 "the device failed to resume I/O "
3350 "from slot/link_reset"); 3421 "from slot/link_reset");
3351 } 3422 }
3423
3424 ha->flags.eeh_busy = 0;
3425
3352 pci_cleanup_aer_uncorrect_error_status(pdev); 3426 pci_cleanup_aer_uncorrect_error_status(pdev);
3353} 3427}
3354 3428
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 010e69b29afe..371dc895972a 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2292,11 +2292,14 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2292 uint32_t faddr, left, burst; 2292 uint32_t faddr, left, burst;
2293 struct qla_hw_data *ha = vha->hw; 2293 struct qla_hw_data *ha = vha->hw;
2294 2294
2295 if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
2296 goto try_fast;
2295 if (offset & 0xfff) 2297 if (offset & 0xfff)
2296 goto slow_read; 2298 goto slow_read;
2297 if (length < OPTROM_BURST_SIZE) 2299 if (length < OPTROM_BURST_SIZE)
2298 goto slow_read; 2300 goto slow_read;
2299 2301
2302try_fast:
2300 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2303 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
2301 &optrom_dma, GFP_KERNEL); 2304 &optrom_dma, GFP_KERNEL);
2302 if (!optrom) { 2305 if (!optrom) {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index c482220f7eed..ed36279a33c1 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k8" 10#define QLA2XXX_VERSION "8.03.01-k10"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d8927681ec88..c6642423cc67 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
749 */ 749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid; 750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751 751
752 scsi_release_buffers(cmd);
752 blk_end_request_all(req, 0); 753 blk_end_request_all(req, 0);
753 754
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd); 755 scsi_next_command(cmd);
756 return; 756 return;
757 } 757 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index ddfcecd5099f..653f22a8deb9 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3527,7 +3527,10 @@ fc_bsg_job_timeout(struct request *req)
3527 if (!done && i->f->bsg_timeout) { 3527 if (!done && i->f->bsg_timeout) {
3528 /* call LLDD to abort the i/o as it has timed out */ 3528 /* call LLDD to abort the i/o as it has timed out */
3529 err = i->f->bsg_timeout(job); 3529 err = i->f->bsg_timeout(job);
3530 if (err) 3530 if (err == -EAGAIN) {
3531 job->ref_cnt--;
3532 return BLK_EH_RESET_TIMER;
3533 } else if (err)
3531 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " 3534 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
3532 "abort failed with status %d\n", err); 3535 "abort failed with status %d\n", err);
3533 } 3536 }
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 3058bb1aff95..fd7b15be7640 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -623,6 +623,11 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
623 } 623 }
624 break; 624 break;
625 case INQUIRY: 625 case INQUIRY:
626 if (lun >= host->max_lun) {
627 cmd->result = DID_NO_CONNECT << 16;
628 done(cmd);
629 return 0;
630 }
626 if (id != host->max_id - 1) 631 if (id != host->max_id - 1)
627 break; 632 break;
628 if (!lun && !cmd->device->channel && 633 if (!lun && !cmd->device->channel &&
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index 1e3d19397a59..8681f1345056 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -58,7 +58,7 @@ static const char serial21285_name[] = "Footbridge UART";
58static void serial21285_stop_tx(struct uart_port *port) 58static void serial21285_stop_tx(struct uart_port *port)
59{ 59{
60 if (tx_enabled(port)) { 60 if (tx_enabled(port)) {
61 disable_irq(IRQ_CONTX); 61 disable_irq_nosync(IRQ_CONTX);
62 tx_enabled(port) = 0; 62 tx_enabled(port) = 0;
63 } 63 }
64} 64}
@@ -74,7 +74,7 @@ static void serial21285_start_tx(struct uart_port *port)
74static void serial21285_stop_rx(struct uart_port *port) 74static void serial21285_stop_rx(struct uart_port *port)
75{ 75{
76 if (rx_enabled(port)) { 76 if (rx_enabled(port)) {
77 disable_irq(IRQ_CONRX); 77 disable_irq_nosync(IRQ_CONRX);
78 rx_enabled(port) = 0; 78 rx_enabled(port) = 0;
79 } 79 }
80} 80}
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index c3e37c8e7e26..e9b15c3746fa 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -83,6 +83,9 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
83 83
84#define PASS_LIMIT 256 84#define PASS_LIMIT 256
85 85
86#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
87
88
86/* 89/*
87 * We default to IRQ0 for the "no irq" hack. Some 90 * We default to IRQ0 for the "no irq" hack. Some
88 * machine types want others as well - they're free 91 * machine types want others as well - they're free
@@ -1792,7 +1795,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
1792 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; 1795 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
1793 spin_unlock_irqrestore(&up->port.lock, flags); 1796 spin_unlock_irqrestore(&up->port.lock, flags);
1794 1797
1795 return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; 1798 return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
1796} 1799}
1797 1800
1798static unsigned int serial8250_get_mctrl(struct uart_port *port) 1801static unsigned int serial8250_get_mctrl(struct uart_port *port)
@@ -1850,8 +1853,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
1850 spin_unlock_irqrestore(&up->port.lock, flags); 1853 spin_unlock_irqrestore(&up->port.lock, flags);
1851} 1854}
1852 1855
1853#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
1854
1855/* 1856/*
1856 * Wait for transmitter & holding register to empty 1857 * Wait for transmitter & holding register to empty
1857 */ 1858 */
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 36ede02ceacf..24485cc62ff8 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -328,15 +328,7 @@ static const struct pnp_device_id pnp_dev_table[] = {
328 /* U.S. Robotics 56K Voice INT PnP*/ 328 /* U.S. Robotics 56K Voice INT PnP*/
329 { "USR9190", 0 }, 329 { "USR9190", 0 },
330 /* Wacom tablets */ 330 /* Wacom tablets */
331 { "WACF004", 0 }, 331 { "WACFXXX", 0 },
332 { "WACF005", 0 },
333 { "WACF006", 0 },
334 { "WACF007", 0 },
335 { "WACF008", 0 },
336 { "WACF009", 0 },
337 { "WACF00A", 0 },
338 { "WACF00B", 0 },
339 { "WACF00C", 0 },
340 /* Compaq touchscreen */ 332 /* Compaq touchscreen */
341 { "FPI2002", 0 }, 333 { "FPI2002", 0 },
342 /* Fujitsu Stylistic touchscreens */ 334 /* Fujitsu Stylistic touchscreens */
@@ -354,6 +346,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
354 { "FUJ02E5", 0 }, 346 { "FUJ02E5", 0 },
355 /* Fujitsu P-series tablet PC device */ 347 /* Fujitsu P-series tablet PC device */
356 { "FUJ02E6", 0 }, 348 { "FUJ02E6", 0 },
349 /* Fujitsu Wacom 2FGT Tablet PC device */
350 { "FUJ02E7", 0 },
357 /* 351 /*
358 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in 352 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
359 * disguise) 353 * disguise)
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 9ff47db0b2ce..ebdd2b984d16 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -459,7 +459,7 @@ config SERIAL_SAMSUNG_UARTS
459 int 459 int
460 depends on ARM && PLAT_S3C 460 depends on ARM && PLAT_S3C
461 default 2 if ARCH_S3C2400 461 default 2 if ARCH_S3C2400
462 default 4 if ARCH_S5PC1XX || ARCH_S3C64XX || CPU_S3C2443 462 default 4 if ARCH_S5P6440 || ARCH_S5PC1XX || ARCH_S5PV210 || ARCH_S3C64XX || CPU_S3C2443
463 default 3 463 default 3
464 help 464 help
465 Select the number of available UART ports for the Samsung S3C 465 Select the number of available UART ports for the Samsung S3C
@@ -526,11 +526,11 @@ config SERIAL_S3C24A0
526 Serial port support for the Samsung S3C24A0 SoC 526 Serial port support for the Samsung S3C24A0 SoC
527 527
528config SERIAL_S3C6400 528config SERIAL_S3C6400
529 tristate "Samsung S3C6400/S3C6410 Serial port support" 529 tristate "Samsung S3C6400/S3C6410/S5P6440 Serial port support"
530 depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410) 530 depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440)
531 default y 531 default y
532 help 532 help
533 Serial port support for the Samsung S3C6400 and S3C6410 533 Serial port support for the Samsung S3C6400, S3C6410 and S5P6440
534 SoCs 534 SoCs
535 535
536config SERIAL_S5PC100 536config SERIAL_S5PC100
@@ -540,6 +540,13 @@ config SERIAL_S5PC100
540 help 540 help
541 Serial port support for the Samsung S5PC100 SoCs 541 Serial port support for the Samsung S5PC100 SoCs
542 542
543config SERIAL_S5PV210
544 tristate "Samsung S5PV210 Serial port support"
545 depends on SERIAL_SAMSUNG && CPU_S5PV210
546 default y
547 help
548 Serial port support for Samsung's S5P Family of SoC's
549
543config SERIAL_MAX3100 550config SERIAL_MAX3100
544 tristate "MAX3100 support" 551 tristate "MAX3100 support"
545 depends on SPI 552 depends on SPI
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 5548fe7df61d..6aa4723b74ee 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
45obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o 45obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o
46obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o 46obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
47obj-$(CONFIG_SERIAL_S5PC100) += s3c6400.o 47obj-$(CONFIG_SERIAL_S5PC100) += s3c6400.o
48obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
48obj-$(CONFIG_SERIAL_MAX3100) += max3100.o 49obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
49obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o 50obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
50obj-$(CONFIG_SERIAL_MUX) += mux.o 51obj-$(CONFIG_SERIAL_MUX) += mux.o
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index c5b546a98520..b801f5ab2b7b 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -1088,7 +1088,7 @@ imx_console_get_options(struct imx_port *sport, int *baud,
1088 int *parity, int *bits) 1088 int *parity, int *bits)
1089{ 1089{
1090 1090
1091 if ( readl(sport->port.membase + UCR1) | UCR1_UARTEN ) { 1091 if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
1092 /* ok, the port was enabled */ 1092 /* ok, the port was enabled */
1093 unsigned int ucr2, ubir,ubmr, uartclk; 1093 unsigned int ucr2, ubir,ubmr, uartclk;
1094 unsigned int baud_raw; 1094 unsigned int baud_raw;
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index 0700cd10b97c..683e66f18e8c 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -411,6 +411,17 @@ static void pmz_transmit_chars(struct uart_pmac_port *uap)
411 goto ack_tx_int; 411 goto ack_tx_int;
412 } 412 }
413 413
414 /* Under some circumstances, we see interrupts reported for
415 * a closed channel. The interrupt mask in R1 is clear, but
416 * R3 still signals the interrupts and we see them when taking
417 * an interrupt for the other channel (this could be a qemu
418 * bug but since the ESCC doc doesn't specify precsiely whether
419 * R3 interrup status bits are masked by R1 interrupt enable
420 * bits, better safe than sorry). --BenH.
421 */
422 if (!ZS_IS_OPEN(uap))
423 goto ack_tx_int;
424
414 if (uap->port.x_char) { 425 if (uap->port.x_char) {
415 uap->flags |= PMACZILOG_FLAG_TX_ACTIVE; 426 uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
416 write_zsdata(uap, uap->port.x_char); 427 write_zsdata(uap, uap->port.x_char);
diff --git a/drivers/serial/s5pv210.c b/drivers/serial/s5pv210.c
new file mode 100644
index 000000000000..8dc03837617b
--- /dev/null
+++ b/drivers/serial/s5pv210.c
@@ -0,0 +1,154 @@
1/* linux/drivers/serial/s5pv210.c
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Based on drivers/serial/s3c6400.c
7 *
8 * Driver for Samsung S5PV210 SoC UARTs.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#include <linux/module.h>
16#include <linux/ioport.h>
17#include <linux/io.h>
18#include <linux/platform_device.h>
19#include <linux/init.h>
20#include <linux/serial_core.h>
21#include <linux/serial.h>
22
23#include <asm/irq.h>
24#include <mach/hardware.h>
25#include <plat/regs-serial.h>
26#include "samsung.h"
27
28static int s5pv210_serial_setsource(struct uart_port *port,
29 struct s3c24xx_uart_clksrc *clk)
30{
31 unsigned long ucon = rd_regl(port, S3C2410_UCON);
32
33 if (strcmp(clk->name, "pclk") == 0)
34 ucon &= ~S5PV210_UCON_CLKMASK;
35 else if (strcmp(clk->name, "uclk1") == 0)
36 ucon |= S5PV210_UCON_CLKMASK;
37 else {
38 printk(KERN_ERR "unknown clock source %s\n", clk->name);
39 return -EINVAL;
40 }
41
42 wr_regl(port, S3C2410_UCON, ucon);
43 return 0;
44}
45
46
47static int s5pv210_serial_getsource(struct uart_port *port,
48 struct s3c24xx_uart_clksrc *clk)
49{
50 u32 ucon = rd_regl(port, S3C2410_UCON);
51
52 clk->divisor = 1;
53
54 switch (ucon & S5PV210_UCON_CLKMASK) {
55 case S5PV210_UCON_PCLK:
56 clk->name = "pclk";
57 break;
58 case S5PV210_UCON_UCLK:
59 clk->name = "uclk1";
60 break;
61 }
62
63 return 0;
64}
65
66static int s5pv210_serial_resetport(struct uart_port *port,
67 struct s3c2410_uartcfg *cfg)
68{
69 unsigned long ucon = rd_regl(port, S3C2410_UCON);
70
71 ucon &= S5PV210_UCON_CLKMASK;
72 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
73 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
74
75 /* reset both fifos */
76 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
77 wr_regl(port, S3C2410_UFCON, cfg->ufcon);
78
79 return 0;
80}
81
82#define S5PV210_UART_DEFAULT_INFO(fifo_size) \
83 .name = "Samsung S5PV210 UART0", \
84 .type = PORT_S3C6400, \
85 .fifosize = fifo_size, \
86 .has_divslot = 1, \
87 .rx_fifomask = S5PV210_UFSTAT_RXMASK, \
88 .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
89 .rx_fifofull = S5PV210_UFSTAT_RXFULL, \
90 .tx_fifofull = S5PV210_UFSTAT_TXFULL, \
91 .tx_fifomask = S5PV210_UFSTAT_TXMASK, \
92 .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \
93 .get_clksrc = s5pv210_serial_getsource, \
94 .set_clksrc = s5pv210_serial_setsource, \
95 .reset_port = s5pv210_serial_resetport
96
97static struct s3c24xx_uart_info s5p_port_fifo256 = {
98 S5PV210_UART_DEFAULT_INFO(256),
99};
100
101static struct s3c24xx_uart_info s5p_port_fifo64 = {
102 S5PV210_UART_DEFAULT_INFO(64),
103};
104
105static struct s3c24xx_uart_info s5p_port_fifo16 = {
106 S5PV210_UART_DEFAULT_INFO(16),
107};
108
109static struct s3c24xx_uart_info *s5p_uart_inf[] = {
110 [0] = &s5p_port_fifo256,
111 [1] = &s5p_port_fifo64,
112 [2] = &s5p_port_fifo16,
113 [3] = &s5p_port_fifo16,
114};
115
116/* device management */
117static int s5p_serial_probe(struct platform_device *pdev)
118{
119 return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]);
120}
121
122static struct platform_driver s5p_serial_drv = {
123 .probe = s5p_serial_probe,
124 .remove = __devexit_p(s3c24xx_serial_remove),
125 .driver = {
126 .name = "s5pv210-uart",
127 .owner = THIS_MODULE,
128 },
129};
130
131static int __init s5pv210_serial_console_init(void)
132{
133 return s3c24xx_serial_initconsole(&s5p_serial_drv, s5p_uart_inf);
134}
135
136console_initcall(s5pv210_serial_console_init);
137
138static int __init s5p_serial_init(void)
139{
140 return s3c24xx_serial_init(&s5p_serial_drv, *s5p_uart_inf);
141}
142
143static void __exit s5p_serial_exit(void)
144{
145 platform_driver_unregister(&s5p_serial_drv);
146}
147
148module_init(s5p_serial_init);
149module_exit(s5p_serial_exit);
150
151MODULE_LICENSE("GPL");
152MODULE_ALIAS("platform:s5pv210-uart");
153MODULE_DESCRIPTION("Samsung S5PV210 UART Driver support");
154MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index 52e3df113ec0..6982243736d1 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -1374,7 +1374,7 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
1374 * data. 1374 * data.
1375*/ 1375*/
1376 1376
1377static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info *info) 1377static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info **info)
1378{ 1378{
1379 struct s3c24xx_uart_port *ptr = s3c24xx_serial_ports; 1379 struct s3c24xx_uart_port *ptr = s3c24xx_serial_ports;
1380 struct platform_device **platdev_ptr; 1380 struct platform_device **platdev_ptr;
@@ -1385,7 +1385,7 @@ static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info *info)
1385 platdev_ptr = s3c24xx_uart_devs; 1385 platdev_ptr = s3c24xx_uart_devs;
1386 1386
1387 for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++, ptr++, platdev_ptr++) { 1387 for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++, ptr++, platdev_ptr++) {
1388 s3c24xx_serial_init_port(ptr, info, *platdev_ptr); 1388 s3c24xx_serial_init_port(ptr, info[i], *platdev_ptr);
1389 } 1389 }
1390 1390
1391 return 0; 1391 return 0;
@@ -1451,7 +1451,7 @@ static struct console s3c24xx_serial_console = {
1451}; 1451};
1452 1452
1453int s3c24xx_serial_initconsole(struct platform_driver *drv, 1453int s3c24xx_serial_initconsole(struct platform_driver *drv,
1454 struct s3c24xx_uart_info *info) 1454 struct s3c24xx_uart_info **info)
1455 1455
1456{ 1456{
1457 struct platform_device *dev = s3c24xx_uart_devs[0]; 1457 struct platform_device *dev = s3c24xx_uart_devs[0];
diff --git a/drivers/serial/samsung.h b/drivers/serial/samsung.h
index 1fb22343df42..0ac06a07d25f 100644
--- a/drivers/serial/samsung.h
+++ b/drivers/serial/samsung.h
@@ -75,19 +75,24 @@ extern int s3c24xx_serial_probe(struct platform_device *dev,
75extern int __devexit s3c24xx_serial_remove(struct platform_device *dev); 75extern int __devexit s3c24xx_serial_remove(struct platform_device *dev);
76 76
77extern int s3c24xx_serial_initconsole(struct platform_driver *drv, 77extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
78 struct s3c24xx_uart_info *uart); 78 struct s3c24xx_uart_info **uart);
79 79
80extern int s3c24xx_serial_init(struct platform_driver *drv, 80extern int s3c24xx_serial_init(struct platform_driver *drv,
81 struct s3c24xx_uart_info *info); 81 struct s3c24xx_uart_info *info);
82 82
83#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE 83#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
84 84
85#define s3c24xx_console_init(__drv, __inf) \ 85#define s3c24xx_console_init(__drv, __inf) \
86static int __init s3c_serial_console_init(void) \ 86static int __init s3c_serial_console_init(void) \
87{ \ 87{ \
88 return s3c24xx_serial_initconsole(__drv, __inf); \ 88 struct s3c24xx_uart_info *uinfo[CONFIG_SERIAL_SAMSUNG_UARTS]; \
89} \ 89 int i; \
90 \ 90 \
91 for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++) \
92 uinfo[i] = __inf; \
93 return s3c24xx_serial_initconsole(__drv, uinfo); \
94} \
95 \
91console_initcall(s3c_serial_console_init) 96console_initcall(s3c_serial_console_init)
92 97
93#else 98#else
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 047530b285bb..7f2830709512 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -385,13 +385,20 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
385 } 385 }
386 386
387 /* 387 /*
388 * As a last resort, if the quotient is zero, 388 * As a last resort, if the range cannot be met then clip to
389 * default to 9600 bps 389 * the nearest chip supported rate.
390 */ 390 */
391 if (!hung_up) 391 if (!hung_up) {
392 tty_termios_encode_baud_rate(termios, 9600, 9600); 392 if (baud <= min)
393 tty_termios_encode_baud_rate(termios,
394 min + 1, min + 1);
395 else
396 tty_termios_encode_baud_rate(termios,
397 max - 1, max - 1);
398 }
393 } 399 }
394 400 /* Should never happen */
401 WARN_ON(1);
395 return 0; 402 return 0;
396} 403}
397 404
@@ -2006,12 +2013,6 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
2006 2013
2007 mutex_lock(&port->mutex); 2014 mutex_lock(&port->mutex);
2008 2015
2009 if (!console_suspend_enabled && uart_console(uport)) {
2010 /* we're going to avoid suspending serial console */
2011 mutex_unlock(&port->mutex);
2012 return 0;
2013 }
2014
2015 tty_dev = device_find_child(uport->dev, &match, serial_match_port); 2016 tty_dev = device_find_child(uport->dev, &match, serial_match_port);
2016 if (device_may_wakeup(tty_dev)) { 2017 if (device_may_wakeup(tty_dev)) {
2017 enable_irq_wake(uport->irq); 2018 enable_irq_wake(uport->irq);
@@ -2019,20 +2020,23 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
2019 mutex_unlock(&port->mutex); 2020 mutex_unlock(&port->mutex);
2020 return 0; 2021 return 0;
2021 } 2022 }
2022 uport->suspended = 1; 2023 if (console_suspend_enabled || !uart_console(uport))
2024 uport->suspended = 1;
2023 2025
2024 if (port->flags & ASYNC_INITIALIZED) { 2026 if (port->flags & ASYNC_INITIALIZED) {
2025 const struct uart_ops *ops = uport->ops; 2027 const struct uart_ops *ops = uport->ops;
2026 int tries; 2028 int tries;
2027 2029
2028 set_bit(ASYNCB_SUSPENDED, &port->flags); 2030 if (console_suspend_enabled || !uart_console(uport)) {
2029 clear_bit(ASYNCB_INITIALIZED, &port->flags); 2031 set_bit(ASYNCB_SUSPENDED, &port->flags);
2032 clear_bit(ASYNCB_INITIALIZED, &port->flags);
2030 2033
2031 spin_lock_irq(&uport->lock); 2034 spin_lock_irq(&uport->lock);
2032 ops->stop_tx(uport); 2035 ops->stop_tx(uport);
2033 ops->set_mctrl(uport, 0); 2036 ops->set_mctrl(uport, 0);
2034 ops->stop_rx(uport); 2037 ops->stop_rx(uport);
2035 spin_unlock_irq(&uport->lock); 2038 spin_unlock_irq(&uport->lock);
2039 }
2036 2040
2037 /* 2041 /*
2038 * Wait for the transmitter to empty. 2042 * Wait for the transmitter to empty.
@@ -2047,16 +2051,18 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
2047 drv->dev_name, 2051 drv->dev_name,
2048 drv->tty_driver->name_base + uport->line); 2052 drv->tty_driver->name_base + uport->line);
2049 2053
2050 ops->shutdown(uport); 2054 if (console_suspend_enabled || !uart_console(uport))
2055 ops->shutdown(uport);
2051 } 2056 }
2052 2057
2053 /* 2058 /*
2054 * Disable the console device before suspending. 2059 * Disable the console device before suspending.
2055 */ 2060 */
2056 if (uart_console(uport)) 2061 if (console_suspend_enabled && uart_console(uport))
2057 console_stop(uport->cons); 2062 console_stop(uport->cons);
2058 2063
2059 uart_change_pm(state, 3); 2064 if (console_suspend_enabled || !uart_console(uport))
2065 uart_change_pm(state, 3);
2060 2066
2061 mutex_unlock(&port->mutex); 2067 mutex_unlock(&port->mutex);
2062 2068
@@ -2073,29 +2079,6 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
2073 2079
2074 mutex_lock(&port->mutex); 2080 mutex_lock(&port->mutex);
2075 2081
2076 if (!console_suspend_enabled && uart_console(uport)) {
2077 /* no need to resume serial console, it wasn't suspended */
2078 /*
2079 * First try to use the console cflag setting.
2080 */
2081 memset(&termios, 0, sizeof(struct ktermios));
2082 termios.c_cflag = uport->cons->cflag;
2083 /*
2084 * If that's unset, use the tty termios setting.
2085 */
2086 if (termios.c_cflag == 0)
2087 termios = *state->port.tty->termios;
2088 else {
2089 termios.c_ispeed = termios.c_ospeed =
2090 tty_termios_input_baud_rate(&termios);
2091 termios.c_ispeed = termios.c_ospeed =
2092 tty_termios_baud_rate(&termios);
2093 }
2094 uport->ops->set_termios(uport, &termios, NULL);
2095 mutex_unlock(&port->mutex);
2096 return 0;
2097 }
2098
2099 tty_dev = device_find_child(uport->dev, &match, serial_match_port); 2082 tty_dev = device_find_child(uport->dev, &match, serial_match_port);
2100 if (!uport->suspended && device_may_wakeup(tty_dev)) { 2083 if (!uport->suspended && device_may_wakeup(tty_dev)) {
2101 disable_irq_wake(uport->irq); 2084 disable_irq_wake(uport->irq);
@@ -2121,21 +2104,23 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
2121 spin_lock_irq(&uport->lock); 2104 spin_lock_irq(&uport->lock);
2122 ops->set_mctrl(uport, 0); 2105 ops->set_mctrl(uport, 0);
2123 spin_unlock_irq(&uport->lock); 2106 spin_unlock_irq(&uport->lock);
2124 ret = ops->startup(uport); 2107 if (console_suspend_enabled || !uart_console(uport)) {
2125 if (ret == 0) { 2108 ret = ops->startup(uport);
2126 uart_change_speed(state, NULL); 2109 if (ret == 0) {
2127 spin_lock_irq(&uport->lock); 2110 uart_change_speed(state, NULL);
2128 ops->set_mctrl(uport, uport->mctrl); 2111 spin_lock_irq(&uport->lock);
2129 ops->start_tx(uport); 2112 ops->set_mctrl(uport, uport->mctrl);
2130 spin_unlock_irq(&uport->lock); 2113 ops->start_tx(uport);
2131 set_bit(ASYNCB_INITIALIZED, &port->flags); 2114 spin_unlock_irq(&uport->lock);
2132 } else { 2115 set_bit(ASYNCB_INITIALIZED, &port->flags);
2133 /* 2116 } else {
2134 * Failed to resume - maybe hardware went away? 2117 /*
2135 * Clear the "initialized" flag so we won't try 2118 * Failed to resume - maybe hardware went away?
2136 * to call the low level drivers shutdown method. 2119 * Clear the "initialized" flag so we won't try
2137 */ 2120 * to call the low level drivers shutdown method.
2138 uart_shutdown(state); 2121 */
2122 uart_shutdown(state);
2123 }
2139 } 2124 }
2140 2125
2141 clear_bit(ASYNCB_SUSPENDED, &port->flags); 2126 clear_bit(ASYNCB_SUSPENDED, &port->flags);
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index fc413f0f8dd2..95421fa3b304 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -146,7 +146,8 @@ static void quirk_wakeup_oxsemi(struct pcmcia_device *link)
146{ 146{
147 struct serial_info *info = link->priv; 147 struct serial_info *info = link->priv;
148 148
149 outb(12, info->c950ctrl + 1); 149 if (info->c950ctrl)
150 outb(12, info->c950ctrl + 1);
150} 151}
151 152
152/* request_region? oxsemi branch does no request_region too... */ 153/* request_region? oxsemi branch does no request_region too... */
@@ -757,6 +758,7 @@ static struct pcmcia_device_id serial_ids[] = {
757 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), 758 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
758 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), 759 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
759 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 760 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
761 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
760 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 762 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
761 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 763 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
762 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), 764 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
@@ -819,6 +821,7 @@ static struct pcmcia_device_id serial_ids[] = {
819 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), 821 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"),
820 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), 822 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"),
821 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ 823 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */
824 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC860", 0xd85f6206, 0x698f93db, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC860 3G Network Adapter R1 */
822 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */ 825 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
823 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ 826 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
824 PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ 827 PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
@@ -827,7 +830,7 @@ static struct pcmcia_device_id serial_ids[] = {
827 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), 830 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
828 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), 831 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
829 PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), 832 PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
830 PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), 833 PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"),
831 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), 834 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b),
832 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83), 835 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83),
833 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490), 836 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490),
@@ -861,6 +864,18 @@ static struct pcmcia_device_id serial_ids[] = {
861}; 864};
862MODULE_DEVICE_TABLE(pcmcia, serial_ids); 865MODULE_DEVICE_TABLE(pcmcia, serial_ids);
863 866
867MODULE_FIRMWARE("cis/PCMLM28.cis");
868MODULE_FIRMWARE("cis/DP83903.cis");
869MODULE_FIRMWARE("cis/3CCFEM556.cis");
870MODULE_FIRMWARE("cis/3CXEM556.cis");
871MODULE_FIRMWARE("cis/SW_8xx_SER.cis");
872MODULE_FIRMWARE("cis/SW_7xx_SER.cis");
873MODULE_FIRMWARE("cis/SW_555_SER.cis");
874MODULE_FIRMWARE("cis/MT5634ZLX.cis");
875MODULE_FIRMWARE("cis/COMpad2.cis");
876MODULE_FIRMWARE("cis/COMpad4.cis");
877MODULE_FIRMWARE("cis/RS-COM-2P.cis");
878
864static struct pcmcia_driver serial_cs_driver = { 879static struct pcmcia_driver serial_cs_driver = {
865 .owner = THIS_MODULE, 880 .owner = THIS_MODULE,
866 .drv = { 881 .drv = {
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 37f0de9dd9ce..42f3333c4ad0 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -1052,7 +1052,18 @@ static void __devinit sci_init_single(struct platform_device *dev,
1052 sci_port->port.ops = &sci_uart_ops; 1052 sci_port->port.ops = &sci_uart_ops;
1053 sci_port->port.iotype = UPIO_MEM; 1053 sci_port->port.iotype = UPIO_MEM;
1054 sci_port->port.line = index; 1054 sci_port->port.line = index;
1055 sci_port->port.fifosize = 1; 1055
1056 switch (p->type) {
1057 case PORT_SCIFA:
1058 sci_port->port.fifosize = 64;
1059 break;
1060 case PORT_SCIF:
1061 sci_port->port.fifosize = 16;
1062 break;
1063 default:
1064 sci_port->port.fifosize = 1;
1065 break;
1066 }
1056 1067
1057 if (dev) { 1068 if (dev) {
1058 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; 1069 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index 377f2712289e..ab2ab3c81834 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -394,7 +394,7 @@ static void ulite_console_write(struct console *co, const char *s,
394 spin_unlock_irqrestore(&port->lock, flags); 394 spin_unlock_irqrestore(&port->lock, flags);
395} 395}
396 396
397static int __init ulite_console_setup(struct console *co, char *options) 397static int __devinit ulite_console_setup(struct console *co, char *options)
398{ 398{
399 struct uart_port *port; 399 struct uart_port *port;
400 int baud = 9600; 400 int baud = 9600;
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index 51e5e1dfa6e5..30973ec16a93 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -173,15 +173,12 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
173 int edge; 173 int edge;
174 174
175 /* 175 /*
176 * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG(!) 176 * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG
177 * 0 0 10 10 1 0 177 * 0 0 10 10 1 1
178 * 0 1 10 10 0 1 178 * 0 1 10 10 0 0
179 * 1 0 11 11 0 1 179 * 1 0 11 11 0 0
180 * 1 1 11 11 1 0 180 * 1 1 11 11 1 1
181 *
182 * (!) Note: REDG is inverted recommended data sheet setting
183 */ 181 */
184
185 sh_msiof_write(p, FCTR, 0); 182 sh_msiof_write(p, FCTR, 0);
186 sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24)); 183 sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24));
187 sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24)); 184 sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24));
@@ -193,7 +190,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
193 edge = cpol ? cpha : !cpha; 190 edge = cpol ? cpha : !cpha;
194 191
195 tmp |= edge << 27; /* TEDG */ 192 tmp |= edge << 27; /* TEDG */
196 tmp |= !edge << 26; /* REDG */ 193 tmp |= edge << 26; /* REDG */
197 tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */ 194 tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */
198 sh_msiof_write(p, CTR, tmp); 195 sh_msiof_write(p, CTR, tmp);
199} 196}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 5681ebed9c65..03dfd27c4bfb 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
494#endif 494#endif
495 break; 495 break;
496 case SSB_BUSTYPE_SDIO: 496 case SSB_BUSTYPE_SDIO:
497#ifdef CONFIG_SSB_SDIO 497#ifdef CONFIG_SSB_SDIOHOST
498 sdev->irq = bus->host_sdio->dev.irq;
499 dev->parent = &bus->host_sdio->dev; 498 dev->parent = &bus->host_sdio->dev;
500#endif 499#endif
501 break; 500 break;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 94eb86319ff3..fc2e963e65e9 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -99,8 +99,6 @@ source "drivers/staging/line6/Kconfig"
99 99
100source "drivers/gpu/drm/vmwgfx/Kconfig" 100source "drivers/gpu/drm/vmwgfx/Kconfig"
101 101
102source "drivers/gpu/drm/radeon/Kconfig"
103
104source "drivers/gpu/drm/nouveau/Kconfig" 102source "drivers/gpu/drm/nouveau/Kconfig"
105 103
106source "drivers/staging/octeon/Kconfig" 104source "drivers/staging/octeon/Kconfig"
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index f4c26572c7df..43c57b7688ab 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -194,9 +194,11 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr,
194{ 194{
195 struct usb_interface *intf = to_usb_interface(dev); 195 struct usb_interface *intf = to_usb_interface(dev);
196 struct asus_oled_dev *odev = usb_get_intfdata(intf); 196 struct asus_oled_dev *odev = usb_get_intfdata(intf);
197 int temp = strict_strtoul(buf, 10, NULL); 197 unsigned long value;
198 if (strict_strtoul(buf, 10, &value))
199 return -EINVAL;
198 200
199 enable_oled(odev, temp); 201 enable_oled(odev, value);
200 202
201 return count; 203 return count;
202} 204}
@@ -207,10 +209,12 @@ static ssize_t class_set_enabled(struct device *device,
207{ 209{
208 struct asus_oled_dev *odev = 210 struct asus_oled_dev *odev =
209 (struct asus_oled_dev *) dev_get_drvdata(device); 211 (struct asus_oled_dev *) dev_get_drvdata(device);
212 unsigned long value;
210 213
211 int temp = strict_strtoul(buf, 10, NULL); 214 if (strict_strtoul(buf, 10, &value))
215 return -EINVAL;
212 216
213 enable_oled(odev, temp); 217 enable_oled(odev, value);
214 218
215 return count; 219 return count;
216} 220}
diff --git a/drivers/staging/cx25821/cx25821-medusa-video.c b/drivers/staging/cx25821/cx25821-medusa-video.c
index e4df8134f059..1eb079b3d429 100644
--- a/drivers/staging/cx25821/cx25821-medusa-video.c
+++ b/drivers/staging/cx25821/cx25821-medusa-video.c
@@ -860,10 +860,8 @@ int medusa_video_init(struct cx25821_dev *dev)
860 860
861 ret_val = medusa_set_videostandard(dev); 861 ret_val = medusa_set_videostandard(dev);
862 862
863 if (ret_val < 0) { 863 if (ret_val < 0)
864 mutex_unlock(&dev->lock);
865 return -EINVAL; 864 return -EINVAL;
866 }
867 865
868 return 1; 866 return 1;
869} 867}
diff --git a/drivers/staging/et131x/et1310_address_map.h b/drivers/staging/et131x/et1310_address_map.h
index 6da843cc343c..e715e4dcb523 100644
--- a/drivers/staging/et131x/et1310_address_map.h
+++ b/drivers/staging/et131x/et1310_address_map.h
@@ -203,11 +203,14 @@ typedef struct _GLOBAL_t { /* Location: */
203 * 9-0: pr ndes 203 * 9-0: pr ndes
204 */ 204 */
205 205
206#define ET_DMA10_MASK 0x3FF /* 10 bit mask for DMA10W types */ 206#define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */
207#define ET_DMA10_WRAP 0x400 207#define ET_DMA12_WRAP 0x1000
208#define ET_DMA4_MASK 0x00F /* 4 bit mask for DMA4W types */ 208#define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */
209#define ET_DMA4_WRAP 0x010 209#define ET_DMA10_WRAP 0x0400
210 210#define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */
211#define ET_DMA4_WRAP 0x0010
212
213#define INDEX12(x) ((x) & ET_DMA12_MASK)
211#define INDEX10(x) ((x) & ET_DMA10_MASK) 214#define INDEX10(x) ((x) & ET_DMA10_MASK)
212#define INDEX4(x) ((x) & ET_DMA4_MASK) 215#define INDEX4(x) ((x) & ET_DMA4_MASK)
213 216
@@ -216,6 +219,11 @@ extern inline void add_10bit(u32 *v, int n)
216 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); 219 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
217} 220}
218 221
222extern inline void add_12bit(u32 *v, int n)
223{
224 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
225}
226
219/* 227/*
220 * 10bit DMA with wrap 228 * 10bit DMA with wrap
221 * txdma tx queue write address reg in txdma address map at 0x1010 229 * txdma tx queue write address reg in txdma address map at 0x1010
diff --git a/drivers/staging/et131x/et1310_rx.c b/drivers/staging/et131x/et1310_rx.c
index 3ddc9b12b8db..81c1a7478ad6 100644
--- a/drivers/staging/et131x/et1310_rx.c
+++ b/drivers/staging/et131x/et1310_rx.c
@@ -831,10 +831,10 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
831 831
832 /* Indicate that we have used this PSR entry. */ 832 /* Indicate that we have used this PSR entry. */
833 /* FIXME wrap 12 */ 833 /* FIXME wrap 12 */
834 rx_local->local_psr_full = (rx_local->local_psr_full + 1) & 0xFFF; 834 add_12bit(&rx_local->local_psr_full, 1);
835 if (rx_local->local_psr_full > rx_local->PsrNumEntries - 1) { 835 if ((rx_local->local_psr_full & 0xFFF) > rx_local->PsrNumEntries - 1) {
836 /* Clear psr full and toggle the wrap bit */ 836 /* Clear psr full and toggle the wrap bit */
837 rx_local->local_psr_full &= 0xFFF; 837 rx_local->local_psr_full &= ~0xFFF;
838 rx_local->local_psr_full ^= 0x1000; 838 rx_local->local_psr_full ^= 0x1000;
839 } 839 }
840 840
diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
index c5b6613f2f2f..c2809f2a2ce0 100644
--- a/drivers/staging/hv/Hv.c
+++ b/drivers/staging/hv/Hv.c
@@ -386,7 +386,7 @@ u16 HvSignalEvent(void)
386 * retrieve the initialized message and event pages. Otherwise, we create and 386 * retrieve the initialized message and event pages. Otherwise, we create and
387 * initialize the message and event pages. 387 * initialize the message and event pages.
388 */ 388 */
389int HvSynicInit(u32 irqVector) 389void HvSynicInit(void *irqarg)
390{ 390{
391 u64 version; 391 u64 version;
392 union hv_synic_simp simp; 392 union hv_synic_simp simp;
@@ -394,13 +394,14 @@ int HvSynicInit(u32 irqVector)
394 union hv_synic_sint sharedSint; 394 union hv_synic_sint sharedSint;
395 union hv_synic_scontrol sctrl; 395 union hv_synic_scontrol sctrl;
396 u64 guestID; 396 u64 guestID;
397 int ret = 0; 397 u32 irqVector = *((u32 *)(irqarg));
398 int cpu = smp_processor_id();
398 399
399 DPRINT_ENTER(VMBUS); 400 DPRINT_ENTER(VMBUS);
400 401
401 if (!gHvContext.HypercallPage) { 402 if (!gHvContext.HypercallPage) {
402 DPRINT_EXIT(VMBUS); 403 DPRINT_EXIT(VMBUS);
403 return ret; 404 return;
404 } 405 }
405 406
406 /* Check the version */ 407 /* Check the version */
@@ -425,27 +426,27 @@ int HvSynicInit(u32 irqVector)
425 */ 426 */
426 rdmsrl(HV_X64_MSR_GUEST_OS_ID, guestID); 427 rdmsrl(HV_X64_MSR_GUEST_OS_ID, guestID);
427 if (guestID == HV_LINUX_GUEST_ID) { 428 if (guestID == HV_LINUX_GUEST_ID) {
428 gHvContext.synICMessagePage[0] = 429 gHvContext.synICMessagePage[cpu] =
429 phys_to_virt(simp.BaseSimpGpa << PAGE_SHIFT); 430 phys_to_virt(simp.BaseSimpGpa << PAGE_SHIFT);
430 gHvContext.synICEventPage[0] = 431 gHvContext.synICEventPage[cpu] =
431 phys_to_virt(siefp.BaseSiefpGpa << PAGE_SHIFT); 432 phys_to_virt(siefp.BaseSiefpGpa << PAGE_SHIFT);
432 } else { 433 } else {
433 DPRINT_ERR(VMBUS, "unknown guest id!!"); 434 DPRINT_ERR(VMBUS, "unknown guest id!!");
434 goto Cleanup; 435 goto Cleanup;
435 } 436 }
436 DPRINT_DBG(VMBUS, "MAPPED: Simp: %p, Sifep: %p", 437 DPRINT_DBG(VMBUS, "MAPPED: Simp: %p, Sifep: %p",
437 gHvContext.synICMessagePage[0], 438 gHvContext.synICMessagePage[cpu],
438 gHvContext.synICEventPage[0]); 439 gHvContext.synICEventPage[cpu]);
439 } else { 440 } else {
440 gHvContext.synICMessagePage[0] = osd_PageAlloc(1); 441 gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
441 if (gHvContext.synICMessagePage[0] == NULL) { 442 if (gHvContext.synICMessagePage[cpu] == NULL) {
442 DPRINT_ERR(VMBUS, 443 DPRINT_ERR(VMBUS,
443 "unable to allocate SYNIC message page!!"); 444 "unable to allocate SYNIC message page!!");
444 goto Cleanup; 445 goto Cleanup;
445 } 446 }
446 447
447 gHvContext.synICEventPage[0] = osd_PageAlloc(1); 448 gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
448 if (gHvContext.synICEventPage[0] == NULL) { 449 if (gHvContext.synICEventPage[cpu] == NULL) {
449 DPRINT_ERR(VMBUS, 450 DPRINT_ERR(VMBUS,
450 "unable to allocate SYNIC event page!!"); 451 "unable to allocate SYNIC event page!!");
451 goto Cleanup; 452 goto Cleanup;
@@ -454,7 +455,7 @@ int HvSynicInit(u32 irqVector)
454 /* Setup the Synic's message page */ 455 /* Setup the Synic's message page */
455 rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64); 456 rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
456 simp.SimpEnabled = 1; 457 simp.SimpEnabled = 1;
457 simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[0]) 458 simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu])
458 >> PAGE_SHIFT; 459 >> PAGE_SHIFT;
459 460
460 DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx", 461 DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx",
@@ -465,7 +466,7 @@ int HvSynicInit(u32 irqVector)
465 /* Setup the Synic's event page */ 466 /* Setup the Synic's event page */
466 rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64); 467 rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
467 siefp.SiefpEnabled = 1; 468 siefp.SiefpEnabled = 1;
468 siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[0]) 469 siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu])
469 >> PAGE_SHIFT; 470 >> PAGE_SHIFT;
470 471
471 DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx", 472 DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx",
@@ -501,32 +502,30 @@ int HvSynicInit(u32 irqVector)
501 502
502 DPRINT_EXIT(VMBUS); 503 DPRINT_EXIT(VMBUS);
503 504
504 return ret; 505 return;
505 506
506Cleanup: 507Cleanup:
507 ret = -1;
508
509 if (gHvContext.GuestId == HV_LINUX_GUEST_ID) { 508 if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
510 if (gHvContext.synICEventPage[0]) 509 if (gHvContext.synICEventPage[cpu])
511 osd_PageFree(gHvContext.synICEventPage[0], 1); 510 osd_PageFree(gHvContext.synICEventPage[cpu], 1);
512 511
513 if (gHvContext.synICMessagePage[0]) 512 if (gHvContext.synICMessagePage[cpu])
514 osd_PageFree(gHvContext.synICMessagePage[0], 1); 513 osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
515 } 514 }
516 515
517 DPRINT_EXIT(VMBUS); 516 DPRINT_EXIT(VMBUS);
518 517 return;
519 return ret;
520} 518}
521 519
522/** 520/**
523 * HvSynicCleanup - Cleanup routine for HvSynicInit(). 521 * HvSynicCleanup - Cleanup routine for HvSynicInit().
524 */ 522 */
525void HvSynicCleanup(void) 523void HvSynicCleanup(void *arg)
526{ 524{
527 union hv_synic_sint sharedSint; 525 union hv_synic_sint sharedSint;
528 union hv_synic_simp simp; 526 union hv_synic_simp simp;
529 union hv_synic_siefp siefp; 527 union hv_synic_siefp siefp;
528 int cpu = smp_processor_id();
530 529
531 DPRINT_ENTER(VMBUS); 530 DPRINT_ENTER(VMBUS);
532 531
@@ -539,6 +538,7 @@ void HvSynicCleanup(void)
539 538
540 sharedSint.Masked = 1; 539 sharedSint.Masked = 1;
541 540
541 /* Need to correctly cleanup in the case of SMP!!! */
542 /* Disable the interrupt */ 542 /* Disable the interrupt */
543 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64); 543 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
544 544
@@ -560,8 +560,8 @@ void HvSynicCleanup(void)
560 560
561 wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64); 561 wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
562 562
563 osd_PageFree(gHvContext.synICMessagePage[0], 1); 563 osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
564 osd_PageFree(gHvContext.synICEventPage[0], 1); 564 osd_PageFree(gHvContext.synICEventPage[cpu], 1);
565 } 565 }
566 566
567 DPRINT_EXIT(VMBUS); 567 DPRINT_EXIT(VMBUS);
diff --git a/drivers/staging/hv/Hv.h b/drivers/staging/hv/Hv.h
index 5379e4bfc56e..fce4b5cdac30 100644
--- a/drivers/staging/hv/Hv.h
+++ b/drivers/staging/hv/Hv.h
@@ -93,7 +93,7 @@ static const struct hv_guid VMBUS_SERVICE_ID = {
93 }, 93 },
94}; 94};
95 95
96#define MAX_NUM_CPUS 1 96#define MAX_NUM_CPUS 32
97 97
98 98
99struct hv_input_signal_event_buffer { 99struct hv_input_signal_event_buffer {
@@ -137,8 +137,8 @@ extern u16 HvPostMessage(union hv_connection_id connectionId,
137 137
138extern u16 HvSignalEvent(void); 138extern u16 HvSignalEvent(void);
139 139
140extern int HvSynicInit(u32 irqVector); 140extern void HvSynicInit(void *irqarg);
141 141
142extern void HvSynicCleanup(void); 142extern void HvSynicCleanup(void *arg);
143 143
144#endif /* __HV_H__ */ 144#endif /* __HV_H__ */
diff --git a/drivers/staging/hv/Vmbus.c b/drivers/staging/hv/Vmbus.c
index a4dd06f6d459..35a023e9f9d1 100644
--- a/drivers/staging/hv/Vmbus.c
+++ b/drivers/staging/hv/Vmbus.c
@@ -129,7 +129,7 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
129 129
130 /* strcpy(dev->name, "vmbus"); */ 130 /* strcpy(dev->name, "vmbus"); */
131 /* SynIC setup... */ 131 /* SynIC setup... */
132 ret = HvSynicInit(*irqvector); 132 on_each_cpu(HvSynicInit, (void *)irqvector, 1);
133 133
134 /* Connect to VMBus in the root partition */ 134 /* Connect to VMBus in the root partition */
135 ret = VmbusConnect(); 135 ret = VmbusConnect();
@@ -150,7 +150,7 @@ static int VmbusOnDeviceRemove(struct hv_device *dev)
150 DPRINT_ENTER(VMBUS); 150 DPRINT_ENTER(VMBUS);
151 VmbusChannelReleaseUnattachedChannels(); 151 VmbusChannelReleaseUnattachedChannels();
152 VmbusDisconnect(); 152 VmbusDisconnect();
153 HvSynicCleanup(); 153 on_each_cpu(HvSynicCleanup, NULL, 1);
154 DPRINT_EXIT(VMBUS); 154 DPRINT_EXIT(VMBUS);
155 155
156 return ret; 156 return ret;
@@ -173,7 +173,8 @@ static void VmbusOnCleanup(struct hv_driver *drv)
173 */ 173 */
174static void VmbusOnMsgDPC(struct hv_driver *drv) 174static void VmbusOnMsgDPC(struct hv_driver *drv)
175{ 175{
176 void *page_addr = gHvContext.synICMessagePage[0]; 176 int cpu = smp_processor_id();
177 void *page_addr = gHvContext.synICMessagePage[cpu];
177 struct hv_message *msg = (struct hv_message *)page_addr + 178 struct hv_message *msg = (struct hv_message *)page_addr +
178 VMBUS_MESSAGE_SINT; 179 VMBUS_MESSAGE_SINT;
179 struct hv_message *copied; 180 struct hv_message *copied;
@@ -230,11 +231,12 @@ static void VmbusOnEventDPC(struct hv_driver *drv)
230static int VmbusOnISR(struct hv_driver *drv) 231static int VmbusOnISR(struct hv_driver *drv)
231{ 232{
232 int ret = 0; 233 int ret = 0;
234 int cpu = smp_processor_id();
233 void *page_addr; 235 void *page_addr;
234 struct hv_message *msg; 236 struct hv_message *msg;
235 union hv_synic_event_flags *event; 237 union hv_synic_event_flags *event;
236 238
237 page_addr = gHvContext.synICMessagePage[0]; 239 page_addr = gHvContext.synICMessagePage[cpu];
238 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; 240 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
239 241
240 DPRINT_ENTER(VMBUS); 242 DPRINT_ENTER(VMBUS);
@@ -248,7 +250,7 @@ static int VmbusOnISR(struct hv_driver *drv)
248 } 250 }
249 251
250 /* TODO: Check if there are events to be process */ 252 /* TODO: Check if there are events to be process */
251 page_addr = gHvContext.synICEventPage[0]; 253 page_addr = gHvContext.synICEventPage[cpu];
252 event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; 254 event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
253 255
254 /* Since we are a child, we only need to check bit 0 */ 256 /* Since we are a child, we only need to check bit 0 */
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 96f11715cd26..355dffcc23b0 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -494,7 +494,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
494 return 0; 494 return 0;
495 /* allocate 2^1 pages = 8K (on i386); 495 /* allocate 2^1 pages = 8K (on i386);
496 * should be more than enough for one device */ 496 * should be more than enough for one device */
497 pages_start = (char *)__get_free_pages(GFP_KERNEL, 1); 497 pages_start = (char *)__get_free_pages(GFP_NOIO, 1);
498 if (!pages_start) 498 if (!pages_start)
499 return -ENOMEM; 499 return -ENOMEM;
500 500
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6e8bcdfd23b4..a678186f218f 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg)
1312 void __user *addr = as->userurb; 1312 void __user *addr = as->userurb;
1313 unsigned int i; 1313 unsigned int i;
1314 1314
1315 if (as->userbuffer) 1315 if (as->userbuffer && urb->actual_length)
1316 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1316 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
1317 urb->transfer_buffer_length)) 1317 urb->actual_length))
1318 goto err_out; 1318 goto err_out;
1319 if (put_user(as->status, &userurb->status)) 1319 if (put_user(as->status, &userurb->status))
1320 goto err_out; 1320 goto err_out;
@@ -1334,14 +1334,11 @@ static int processcompl(struct async *as, void __user * __user *arg)
1334 } 1334 }
1335 } 1335 }
1336 1336
1337 free_async(as);
1338
1339 if (put_user(addr, (void __user * __user *)arg)) 1337 if (put_user(addr, (void __user * __user *)arg))
1340 return -EFAULT; 1338 return -EFAULT;
1341 return 0; 1339 return 0;
1342 1340
1343err_out: 1341err_out:
1344 free_async(as);
1345 return -EFAULT; 1342 return -EFAULT;
1346} 1343}
1347 1344
@@ -1371,8 +1368,11 @@ static struct async *reap_as(struct dev_state *ps)
1371static int proc_reapurb(struct dev_state *ps, void __user *arg) 1368static int proc_reapurb(struct dev_state *ps, void __user *arg)
1372{ 1369{
1373 struct async *as = reap_as(ps); 1370 struct async *as = reap_as(ps);
1374 if (as) 1371 if (as) {
1375 return processcompl(as, (void __user * __user *)arg); 1372 int retval = processcompl(as, (void __user * __user *)arg);
1373 free_async(as);
1374 return retval;
1375 }
1376 if (signal_pending(current)) 1376 if (signal_pending(current))
1377 return -EINTR; 1377 return -EINTR;
1378 return -EIO; 1378 return -EIO;
@@ -1380,11 +1380,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
1380 1380
1381static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) 1381static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
1382{ 1382{
1383 int retval;
1383 struct async *as; 1384 struct async *as;
1384 1385
1385 if (!(as = async_getcompleted(ps))) 1386 as = async_getcompleted(ps);
1386 return -EAGAIN; 1387 retval = -EAGAIN;
1387 return processcompl(as, (void __user * __user *)arg); 1388 if (as) {
1389 retval = processcompl(as, (void __user * __user *)arg);
1390 free_async(as);
1391 }
1392 return retval;
1388} 1393}
1389 1394
1390#ifdef CONFIG_COMPAT 1395#ifdef CONFIG_COMPAT
@@ -1475,9 +1480,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1475 void __user *addr = as->userurb; 1480 void __user *addr = as->userurb;
1476 unsigned int i; 1481 unsigned int i;
1477 1482
1478 if (as->userbuffer) 1483 if (as->userbuffer && urb->actual_length)
1479 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1484 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
1480 urb->transfer_buffer_length)) 1485 urb->actual_length))
1481 return -EFAULT; 1486 return -EFAULT;
1482 if (put_user(as->status, &userurb->status)) 1487 if (put_user(as->status, &userurb->status))
1483 return -EFAULT; 1488 return -EFAULT;
@@ -1497,7 +1502,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1497 } 1502 }
1498 } 1503 }
1499 1504
1500 free_async(as);
1501 if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) 1505 if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
1502 return -EFAULT; 1506 return -EFAULT;
1503 return 0; 1507 return 0;
@@ -1506,8 +1510,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1506static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) 1510static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
1507{ 1511{
1508 struct async *as = reap_as(ps); 1512 struct async *as = reap_as(ps);
1509 if (as) 1513 if (as) {
1510 return processcompl_compat(as, (void __user * __user *)arg); 1514 int retval = processcompl_compat(as, (void __user * __user *)arg);
1515 free_async(as);
1516 return retval;
1517 }
1511 if (signal_pending(current)) 1518 if (signal_pending(current))
1512 return -EINTR; 1519 return -EINTR;
1513 return -EIO; 1520 return -EIO;
@@ -1515,11 +1522,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
1515 1522
1516static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) 1523static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
1517{ 1524{
1525 int retval;
1518 struct async *as; 1526 struct async *as;
1519 1527
1520 if (!(as = async_getcompleted(ps))) 1528 retval = -EAGAIN;
1521 return -EAGAIN; 1529 as = async_getcompleted(ps);
1522 return processcompl_compat(as, (void __user * __user *)arg); 1530 if (as) {
1531 retval = processcompl_compat(as, (void __user * __user *)arg);
1532 free_async(as);
1533 }
1534 return retval;
1523} 1535}
1524 1536
1525 1537
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 0495fa651225..80995ef0868c 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1684,6 +1684,24 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
1684 } 1684 }
1685 } 1685 }
1686 if (cur_alt && new_alt) { 1686 if (cur_alt && new_alt) {
1687 struct usb_interface *iface = usb_ifnum_to_if(udev,
1688 cur_alt->desc.bInterfaceNumber);
1689
1690 if (iface->resetting_device) {
1691 /*
1692 * The USB core just reset the device, so the xHCI host
1693 * and the device will think alt setting 0 is installed.
1694 * However, the USB core will pass in the alternate
1695 * setting installed before the reset as cur_alt. Dig
1696 * out the alternate setting 0 structure, or the first
1697 * alternate setting if a broken device doesn't have alt
1698 * setting 0.
1699 */
1700 cur_alt = usb_altnum_to_altsetting(iface, 0);
1701 if (!cur_alt)
1702 cur_alt = &iface->altsetting[0];
1703 }
1704
1687 /* Drop all the endpoints in the current alt setting */ 1705 /* Drop all the endpoints in the current alt setting */
1688 for (i = 0; i < cur_alt->desc.bNumEndpoints; i++) { 1706 for (i = 0; i < cur_alt->desc.bNumEndpoints; i++) {
1689 ret = hcd->driver->drop_endpoint(hcd, udev, 1707 ret = hcd->driver->drop_endpoint(hcd, udev,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0cec6caf6e9b..35cc8b9ba1f5 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3347,6 +3347,9 @@ static void hub_events(void)
3347 USB_PORT_FEAT_C_SUSPEND); 3347 USB_PORT_FEAT_C_SUSPEND);
3348 udev = hdev->children[i-1]; 3348 udev = hdev->children[i-1];
3349 if (udev) { 3349 if (udev) {
3350 /* TRSMRCY = 10 msec */
3351 msleep(10);
3352
3350 usb_lock_device(udev); 3353 usb_lock_device(udev);
3351 ret = remote_wakeup(hdev-> 3354 ret = remote_wakeup(hdev->
3352 children[i-1]); 3355 children[i-1]);
@@ -3692,19 +3695,14 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
3692 usb_enable_interface(udev, intf, true); 3695 usb_enable_interface(udev, intf, true);
3693 ret = 0; 3696 ret = 0;
3694 } else { 3697 } else {
3695 /* We've just reset the device, so it will think alt 3698 /* Let the bandwidth allocation function know that this
3696 * setting 0 is installed. For usb_set_interface() to 3699 * device has been reset, and it will have to use
3697 * work properly, we need to set the current alternate 3700 * alternate setting 0 as the current alternate setting.
3698 * interface setting to 0 (or the first alt setting, if
3699 * the device doesn't have alt setting 0).
3700 */ 3701 */
3701 intf->cur_altsetting = 3702 intf->resetting_device = 1;
3702 usb_find_alt_setting(config, i, 0);
3703 if (!intf->cur_altsetting)
3704 intf->cur_altsetting =
3705 &config->intf_cache[i]->altsetting[0];
3706 ret = usb_set_interface(udev, desc->bInterfaceNumber, 3703 ret = usb_set_interface(udev, desc->bInterfaceNumber,
3707 desc->bAlternateSetting); 3704 desc->bAlternateSetting);
3705 intf->resetting_device = 0;
3708 } 3706 }
3709 if (ret < 0) { 3707 if (ret < 0) {
3710 dev_err(&udev->dev, "failed to restore interface %d " 3708 dev_err(&udev->dev, "failed to restore interface %d "
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 1b994846e8e0..9bc95fec793f 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -906,11 +906,11 @@ char *usb_cache_string(struct usb_device *udev, int index)
906 if (index <= 0) 906 if (index <= 0)
907 return NULL; 907 return NULL;
908 908
909 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL); 909 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
910 if (buf) { 910 if (buf) {
911 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); 911 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
912 if (len > 0) { 912 if (len > 0) {
913 smallbuf = kmalloc(++len, GFP_KERNEL); 913 smallbuf = kmalloc(++len, GFP_NOIO);
914 if (!smallbuf) 914 if (!smallbuf)
915 return buf; 915 return buf;
916 memcpy(smallbuf, buf, len); 916 memcpy(smallbuf, buf, len);
@@ -1731,7 +1731,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
1731 if (cp) { 1731 if (cp) {
1732 nintf = cp->desc.bNumInterfaces; 1732 nintf = cp->desc.bNumInterfaces;
1733 new_interfaces = kmalloc(nintf * sizeof(*new_interfaces), 1733 new_interfaces = kmalloc(nintf * sizeof(*new_interfaces),
1734 GFP_KERNEL); 1734 GFP_NOIO);
1735 if (!new_interfaces) { 1735 if (!new_interfaces) {
1736 dev_err(&dev->dev, "Out of memory\n"); 1736 dev_err(&dev->dev, "Out of memory\n");
1737 return -ENOMEM; 1737 return -ENOMEM;
@@ -1740,7 +1740,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
1740 for (; n < nintf; ++n) { 1740 for (; n < nintf; ++n) {
1741 new_interfaces[n] = kzalloc( 1741 new_interfaces[n] = kzalloc(
1742 sizeof(struct usb_interface), 1742 sizeof(struct usb_interface),
1743 GFP_KERNEL); 1743 GFP_NOIO);
1744 if (!new_interfaces[n]) { 1744 if (!new_interfaces[n]) {
1745 dev_err(&dev->dev, "Out of memory\n"); 1745 dev_err(&dev->dev, "Out of memory\n");
1746 ret = -ENOMEM; 1746 ret = -ENOMEM;
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 485edf937f25..5f3908f6e2dc 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -115,6 +115,12 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
115 case USB_SPEED_HIGH: 115 case USB_SPEED_HIGH:
116 speed = "480"; 116 speed = "480";
117 break; 117 break;
118 case USB_SPEED_VARIABLE:
119 speed = "480";
120 break;
121 case USB_SPEED_SUPER:
122 speed = "5000";
123 break;
118 default: 124 default:
119 speed = "unknown"; 125 speed = "unknown";
120 } 126 }
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 0a577d5694fd..d4f0db58a8ad 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -358,7 +358,7 @@ done:
358 * b15: bmType (0 == data) 358 * b15: bmType (0 == data)
359 */ 359 */
360 len = skb->len; 360 len = skb->len;
361 put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2)); 361 put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
362 362
363 /* add a zero-length EEM packet, if needed */ 363 /* add a zero-length EEM packet, if needed */
364 if (padlen) 364 if (padlen)
@@ -464,7 +464,6 @@ static int eem_unwrap(struct gether *port,
464 } 464 }
465 465
466 /* validate CRC */ 466 /* validate CRC */
467 crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN);
468 if (header & BIT(14)) { 467 if (header & BIT(14)) {
469 crc = get_unaligned_le32(skb->data + len 468 crc = get_unaligned_le32(skb->data + len
470 - ETH_FCS_LEN); 469 - ETH_FCS_LEN);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 429560100b10..76496f5d272c 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -29,7 +29,7 @@
29#if defined USB_ETH_RNDIS 29#if defined USB_ETH_RNDIS
30# undef USB_ETH_RNDIS 30# undef USB_ETH_RNDIS
31#endif 31#endif
32#ifdef CONFIG_USB_ETH_RNDIS 32#ifdef CONFIG_USB_G_MULTI_RNDIS
33# define USB_ETH_RNDIS y 33# define USB_ETH_RNDIS y
34#endif 34#endif
35 35
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index e6fedbd5a654..be5fb34d9602 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -65,6 +65,10 @@
65#include <mach/pxa25x-udc.h> 65#include <mach/pxa25x-udc.h>
66#endif 66#endif
67 67
68#ifdef CONFIG_ARCH_LUBBOCK
69#include <mach/lubbock.h>
70#endif
71
68#include <asm/mach/udc_pxa2xx.h> 72#include <asm/mach/udc_pxa2xx.h>
69 73
70 74
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index e220fb8091a3..8b45145b9136 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -26,6 +26,7 @@
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/err.h>
29 30
30#include <linux/usb/ch9.h> 31#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h> 32#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 4b5dbd0127f5..5fc80a104150 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2582,6 +2582,7 @@ err:
2582 hsotg->gadget.dev.driver = NULL; 2582 hsotg->gadget.dev.driver = NULL;
2583 return ret; 2583 return ret;
2584} 2584}
2585EXPORT_SYMBOL(usb_gadget_register_driver);
2585 2586
2586int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2587int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2587{ 2588{
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 5859522d6edd..1ec3857f22e6 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -787,9 +787,10 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
787 787
788 /* start 20 msec resume signaling from this port, 788 /* start 20 msec resume signaling from this port,
789 * and make khubd collect PORT_STAT_C_SUSPEND to 789 * and make khubd collect PORT_STAT_C_SUSPEND to
790 * stop that signaling. 790 * stop that signaling. Use 5 ms extra for safety,
791 * like usb_port_resume() does.
791 */ 792 */
792 ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); 793 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
793 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); 794 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
794 mod_timer(&hcd->rh_timer, ehci->reset_done[i]); 795 mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
795 } 796 }
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 2c6571c05f35..19372673bf09 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -120,9 +120,26 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
120 del_timer_sync(&ehci->watchdog); 120 del_timer_sync(&ehci->watchdog);
121 del_timer_sync(&ehci->iaa_watchdog); 121 del_timer_sync(&ehci->iaa_watchdog);
122 122
123 port = HCS_N_PORTS (ehci->hcs_params);
124 spin_lock_irq (&ehci->lock); 123 spin_lock_irq (&ehci->lock);
125 124
125 /* Once the controller is stopped, port resumes that are already
126 * in progress won't complete. Hence if remote wakeup is enabled
127 * for the root hub and any ports are in the middle of a resume or
128 * remote wakeup, we must fail the suspend.
129 */
130 if (hcd->self.root_hub->do_remote_wakeup) {
131 port = HCS_N_PORTS(ehci->hcs_params);
132 while (port--) {
133 if (ehci->reset_done[port] != 0) {
134 spin_unlock_irq(&ehci->lock);
135 ehci_dbg(ehci, "suspend failed because "
136 "port %d is resuming\n",
137 port + 1);
138 return -EBUSY;
139 }
140 }
141 }
142
126 /* stop schedules, clean any completed work */ 143 /* stop schedules, clean any completed work */
127 if (HC_IS_RUNNING(hcd->state)) { 144 if (HC_IS_RUNNING(hcd->state)) {
128 ehci_quiesce (ehci); 145 ehci_quiesce (ehci);
@@ -138,6 +155,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
138 */ 155 */
139 ehci->bus_suspended = 0; 156 ehci->bus_suspended = 0;
140 ehci->owned_ports = 0; 157 ehci->owned_ports = 0;
158 port = HCS_N_PORTS(ehci->hcs_params);
141 while (port--) { 159 while (port--) {
142 u32 __iomem *reg = &ehci->regs->port_status [port]; 160 u32 __iomem *reg = &ehci->regs->port_status [port];
143 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; 161 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
@@ -178,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
178 if (hostpc_reg) { 196 if (hostpc_reg) {
179 u32 t3; 197 u32 t3;
180 198
199 spin_unlock_irq(&ehci->lock);
181 msleep(5);/* 5ms for HCD enter low pwr mode */ 200 msleep(5);/* 5ms for HCD enter low pwr mode */
201 spin_lock_irq(&ehci->lock);
182 t3 = ehci_readl(ehci, hostpc_reg); 202 t3 = ehci_readl(ehci, hostpc_reg);
183 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); 203 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
184 t3 = ehci_readl(ehci, hostpc_reg); 204 t3 = ehci_readl(ehci, hostpc_reg);
@@ -886,17 +906,18 @@ static int ehci_hub_control (
886 if ((temp & PORT_PE) == 0 906 if ((temp & PORT_PE) == 0
887 || (temp & PORT_RESET) != 0) 907 || (temp & PORT_RESET) != 0)
888 goto error; 908 goto error;
889 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); 909
890 /* After above check the port must be connected. 910 /* After above check the port must be connected.
891 * Set appropriate bit thus could put phy into low power 911 * Set appropriate bit thus could put phy into low power
892 * mode if we have hostpc feature 912 * mode if we have hostpc feature
893 */ 913 */
914 temp &= ~PORT_WKCONN_E;
915 temp |= PORT_WKDISC_E | PORT_WKOC_E;
916 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
894 if (hostpc_reg) { 917 if (hostpc_reg) {
895 temp &= ~PORT_WKCONN_E; 918 spin_unlock_irqrestore(&ehci->lock, flags);
896 temp |= (PORT_WKDISC_E | PORT_WKOC_E);
897 ehci_writel(ehci, temp | PORT_SUSPEND,
898 status_reg);
899 msleep(5);/* 5ms for HCD enter low pwr mode */ 919 msleep(5);/* 5ms for HCD enter low pwr mode */
920 spin_lock_irqsave(&ehci->lock, flags);
900 temp1 = ehci_readl(ehci, hostpc_reg); 921 temp1 = ehci_readl(ehci, hostpc_reg);
901 ehci_writel(ehci, temp1 | HOSTPC_PHCD, 922 ehci_writel(ehci, temp1 | HOSTPC_PHCD,
902 hostpc_reg); 923 hostpc_reg);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index a427d3b00634..89521775c567 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -849,9 +849,10 @@ qh_make (
849 * But interval 1 scheduling is simpler, and 849 * But interval 1 scheduling is simpler, and
850 * includes high bandwidth. 850 * includes high bandwidth.
851 */ 851 */
852 dbg ("intr period %d uframes, NYET!", 852 urb->interval = 1;
853 urb->interval); 853 } else if (qh->period > ehci->periodic_size) {
854 goto done; 854 qh->period = ehci->periodic_size;
855 urb->interval = qh->period << 3;
855 } 856 }
856 } else { 857 } else {
857 int think_time; 858 int think_time;
@@ -874,6 +875,10 @@ qh_make (
874 usb_calc_bus_time (urb->dev->speed, 875 usb_calc_bus_time (urb->dev->speed,
875 is_input, 0, max_packet (maxp))); 876 is_input, 0, max_packet (maxp)));
876 qh->period = urb->interval; 877 qh->period = urb->interval;
878 if (qh->period > ehci->periodic_size) {
879 qh->period = ehci->periodic_size;
880 urb->interval = qh->period;
881 }
877 } 882 }
878 } 883 }
879 884
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 0951818ef93b..78e7c3cfcb72 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -242,9 +242,10 @@ err:
242static void fhci_usb_free(void *lld) 242static void fhci_usb_free(void *lld)
243{ 243{
244 struct fhci_usb *usb = lld; 244 struct fhci_usb *usb = lld;
245 struct fhci_hcd *fhci = usb->fhci; 245 struct fhci_hcd *fhci;
246 246
247 if (usb) { 247 if (usb) {
248 fhci = usb->fhci;
248 fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF); 249 fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
249 fhci_ep0_free(usb); 250 fhci_ep0_free(usb);
250 kfree(usb->actual_frame); 251 kfree(usb->actual_frame);
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index d224ab467a40..e1232890c78b 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -105,7 +105,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
105 if (ep->td_base) 105 if (ep->td_base)
106 cpm_muram_free(cpm_muram_offset(ep->td_base)); 106 cpm_muram_free(cpm_muram_offset(ep->td_base));
107 107
108 if (ep->conf_frame_Q) { 108 if (kfifo_initialized(&ep->conf_frame_Q)) {
109 size = cq_howmany(&ep->conf_frame_Q); 109 size = cq_howmany(&ep->conf_frame_Q);
110 for (; size; size--) { 110 for (; size; size--) {
111 struct packet *pkt = cq_get(&ep->conf_frame_Q); 111 struct packet *pkt = cq_get(&ep->conf_frame_Q);
@@ -115,7 +115,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
115 cq_delete(&ep->conf_frame_Q); 115 cq_delete(&ep->conf_frame_Q);
116 } 116 }
117 117
118 if (ep->empty_frame_Q) { 118 if (kfifo_initialized(&ep->empty_frame_Q)) {
119 size = cq_howmany(&ep->empty_frame_Q); 119 size = cq_howmany(&ep->empty_frame_Q);
120 for (; size; size--) { 120 for (; size; size--) {
121 struct packet *pkt = cq_get(&ep->empty_frame_Q); 121 struct packet *pkt = cq_get(&ep->empty_frame_Q);
@@ -125,7 +125,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
125 cq_delete(&ep->empty_frame_Q); 125 cq_delete(&ep->empty_frame_Q);
126 } 126 }
127 127
128 if (ep->dummy_packets_Q) { 128 if (kfifo_initialized(&ep->dummy_packets_Q)) {
129 size = cq_howmany(&ep->dummy_packets_Q); 129 size = cq_howmany(&ep->dummy_packets_Q);
130 for (; size; size--) { 130 for (; size; size--) {
131 u8 *buff = cq_get(&ep->dummy_packets_Q); 131 u8 *buff = cq_get(&ep->dummy_packets_Q);
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 73352f3739b5..42971657fde2 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2270,10 +2270,10 @@ static int isp1362_mem_config(struct usb_hcd *hcd)
2270 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n"); 2270 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2271 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n", 2271 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2272 istl_size / 2, istl_size, 0, istl_size / 2); 2272 istl_size / 2, istl_size, 0, istl_size / 2);
2273 dev_info(hcd->self.controller, " INTL: %4d * (%3lu+8): %4d @ $%04x\n", 2273 dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
2274 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE, 2274 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2275 intl_size, istl_size); 2275 intl_size, istl_size);
2276 dev_info(hcd->self.controller, " ATL : %4d * (%3lu+8): %4d @ $%04x\n", 2276 dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
2277 atl_buffers, atl_blksize - PTD_HEADER_SIZE, 2277 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2278 atl_size, istl_size + intl_size); 2278 atl_size, istl_size + intl_size);
2279 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total, 2279 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
@@ -2697,6 +2697,8 @@ static int __init isp1362_probe(struct platform_device *pdev)
2697 void __iomem *data_reg; 2697 void __iomem *data_reg;
2698 int irq; 2698 int irq;
2699 int retval = 0; 2699 int retval = 0;
2700 struct resource *irq_res;
2701 unsigned int irq_flags = 0;
2700 2702
2701 /* basic sanity checks first. board-specific init logic should 2703 /* basic sanity checks first. board-specific init logic should
2702 * have initialized this the three resources and probably board 2704 * have initialized this the three resources and probably board
@@ -2710,11 +2712,12 @@ static int __init isp1362_probe(struct platform_device *pdev)
2710 2712
2711 data = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2713 data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2712 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2714 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2713 irq = platform_get_irq(pdev, 0); 2715 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2714 if (!addr || !data || irq < 0) { 2716 if (!addr || !data || !irq_res) {
2715 retval = -ENODEV; 2717 retval = -ENODEV;
2716 goto err1; 2718 goto err1;
2717 } 2719 }
2720 irq = irq_res->start;
2718 2721
2719#ifdef CONFIG_USB_HCD_DMA 2722#ifdef CONFIG_USB_HCD_DMA
2720 if (pdev->dev.dma_mask) { 2723 if (pdev->dev.dma_mask) {
@@ -2781,12 +2784,16 @@ static int __init isp1362_probe(struct platform_device *pdev)
2781 } 2784 }
2782#endif 2785#endif
2783 2786
2784#ifdef CONFIG_ARM 2787 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2785 if (isp1362_hcd->board) 2788 irq_flags |= IRQF_TRIGGER_RISING;
2786 set_irq_type(irq, isp1362_hcd->board->int_act_high ? IRQT_RISING : IRQT_FALLING); 2789 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2787#endif 2790 irq_flags |= IRQF_TRIGGER_FALLING;
2791 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2792 irq_flags |= IRQF_TRIGGER_HIGH;
2793 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2794 irq_flags |= IRQF_TRIGGER_LOW;
2788 2795
2789 retval = usb_add_hcd(hcd, irq, IRQF_TRIGGER_LOW | IRQF_DISABLED | IRQF_SHARED); 2796 retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
2790 if (retval != 0) 2797 if (retval != 0)
2791 goto err6; 2798 goto err6;
2792 pr_info("%s, irq %d\n", hcd->product_desc, irq); 2799 pr_info("%s, irq %d\n", hcd->product_desc, irq);
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 9600a58299db..27b8f7cb4471 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1039,12 +1039,12 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
1039 if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) { 1039 if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) {
1040 u32 buffstatus; 1040 u32 buffstatus;
1041 1041
1042 /* XXX 1042 /*
1043 * NAKs are handled in HW by the chip. Usually if the 1043 * NAKs are handled in HW by the chip. Usually if the
1044 * device is not able to send data fast enough. 1044 * device is not able to send data fast enough.
1045 * This did not trigger for a long time now. 1045 * This happens mostly on slower hardware.
1046 */ 1046 */
1047 printk(KERN_ERR "Reloading ptd %p/%p... qh %p readed: " 1047 printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: "
1048 "%d of %zu done: %08x cur: %08x\n", qtd, 1048 "%d of %zu done: %08x cur: %08x\n", qtd,
1049 urb, qh, PTD_XFERRED_LENGTH(dw3), 1049 urb, qh, PTD_XFERRED_LENGTH(dw3),
1050 qtd->length, done_map, 1050 qtd->length, done_map,
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index b7a661c02bcd..bee558aed427 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -35,7 +35,9 @@
35#include <linux/usb.h> 35#include <linux/usb.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/mm.h>
38#include <linux/irq.h> 39#include <linux/irq.h>
40#include <asm/cacheflush.h>
39 41
40#include "../core/hcd.h" 42#include "../core/hcd.h"
41#include "r8a66597.h" 43#include "r8a66597.h"
@@ -216,8 +218,17 @@ static void disable_controller(struct r8a66597 *r8a66597)
216{ 218{
217 int port; 219 int port;
218 220
221 /* disable interrupts */
219 r8a66597_write(r8a66597, 0, INTENB0); 222 r8a66597_write(r8a66597, 0, INTENB0);
220 r8a66597_write(r8a66597, 0, INTSTS0); 223 r8a66597_write(r8a66597, 0, INTENB1);
224 r8a66597_write(r8a66597, 0, BRDYENB);
225 r8a66597_write(r8a66597, 0, BEMPENB);
226 r8a66597_write(r8a66597, 0, NRDYENB);
227
228 /* clear status */
229 r8a66597_write(r8a66597, 0, BRDYSTS);
230 r8a66597_write(r8a66597, 0, NRDYSTS);
231 r8a66597_write(r8a66597, 0, BEMPSTS);
221 232
222 for (port = 0; port < r8a66597->max_root_hub; port++) 233 for (port = 0; port < r8a66597->max_root_hub; port++)
223 r8a66597_disable_port(r8a66597, port); 234 r8a66597_disable_port(r8a66597, port);
@@ -811,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
811 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); 822 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
812} 823}
813 824
825static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
826 int status)
827__releases(r8a66597->lock)
828__acquires(r8a66597->lock)
829{
830 if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
831 void *ptr;
832
833 for (ptr = urb->transfer_buffer;
834 ptr < urb->transfer_buffer + urb->transfer_buffer_length;
835 ptr += PAGE_SIZE)
836 flush_dcache_page(virt_to_page(ptr));
837 }
838
839 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
840 spin_unlock(&r8a66597->lock);
841 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
842 spin_lock(&r8a66597->lock);
843}
844
814/* this function must be called with interrupt disabled */ 845/* this function must be called with interrupt disabled */
815static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) 846static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
816{ 847{
@@ -829,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
829 list_del(&td->queue); 860 list_del(&td->queue);
830 kfree(td); 861 kfree(td);
831 862
832 if (urb) { 863 if (urb)
833 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), 864 r8a66597_urb_done(r8a66597, urb, -ENODEV);
834 urb);
835 865
836 spin_unlock(&r8a66597->lock);
837 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
838 -ENODEV);
839 spin_lock(&r8a66597->lock);
840 }
841 break; 866 break;
842 } 867 }
843} 868}
@@ -997,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
997/* this function must be called with interrupt disabled */ 1022/* this function must be called with interrupt disabled */
998static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, 1023static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
999 u16 syssts) 1024 u16 syssts)
1025__releases(r8a66597->lock)
1026__acquires(r8a66597->lock)
1000{ 1027{
1001 if (syssts == SE0) { 1028 if (syssts == SE0) {
1002 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); 1029 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
@@ -1014,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1014 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); 1041 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
1015 } 1042 }
1016 1043
1044 spin_unlock(&r8a66597->lock);
1017 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); 1045 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
1046 spin_lock(&r8a66597->lock);
1018} 1047}
1019 1048
1020/* this function must be called with interrupt disabled */ 1049/* this function must be called with interrupt disabled */
@@ -1274,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
1274 if (usb_pipeisoc(urb->pipe)) 1303 if (usb_pipeisoc(urb->pipe))
1275 urb->start_frame = r8a66597_get_frame(hcd); 1304 urb->start_frame = r8a66597_get_frame(hcd);
1276 1305
1277 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); 1306 r8a66597_urb_done(r8a66597, urb, status);
1278 spin_unlock(&r8a66597->lock);
1279 usb_hcd_giveback_urb(hcd, urb, status);
1280 spin_lock(&r8a66597->lock);
1281 } 1307 }
1282 1308
1283 if (restart) { 1309 if (restart) {
@@ -2466,6 +2492,12 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
2466 r8a66597->rh_timer.data = (unsigned long)r8a66597; 2492 r8a66597->rh_timer.data = (unsigned long)r8a66597;
2467 r8a66597->reg = (unsigned long)reg; 2493 r8a66597->reg = (unsigned long)reg;
2468 2494
2495 /* make sure no interrupts are pending */
2496 ret = r8a66597_clock_enable(r8a66597);
2497 if (ret < 0)
2498 goto clean_up3;
2499 disable_controller(r8a66597);
2500
2469 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) { 2501 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
2470 INIT_LIST_HEAD(&r8a66597->pipe_queue[i]); 2502 INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
2471 init_timer(&r8a66597->td_timer[i]); 2503 init_timer(&r8a66597->td_timer[i]);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 5cd0e48f67fb..99cd00fd3514 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -749,7 +749,20 @@ static int uhci_rh_suspend(struct usb_hcd *hcd)
749 spin_lock_irq(&uhci->lock); 749 spin_lock_irq(&uhci->lock);
750 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) 750 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
751 rc = -ESHUTDOWN; 751 rc = -ESHUTDOWN;
752 else if (!uhci->dead) 752 else if (uhci->dead)
753 ; /* Dead controllers tell no tales */
754
755 /* Once the controller is stopped, port resumes that are already
756 * in progress won't complete. Hence if remote wakeup is enabled
757 * for the root hub and any ports are in the middle of a resume or
758 * remote wakeup, we must fail the suspend.
759 */
760 else if (hcd->self.root_hub->do_remote_wakeup &&
761 uhci->resuming_ports) {
762 dev_dbg(uhci_dev(uhci), "suspend failed because a port "
763 "is resuming\n");
764 rc = -EBUSY;
765 } else
753 suspend_rh(uhci, UHCI_RH_SUSPENDED); 766 suspend_rh(uhci, UHCI_RH_SUSPENDED);
754 spin_unlock_irq(&uhci->lock); 767 spin_unlock_irq(&uhci->lock);
755 return rc; 768 return rc;
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 885b585360b9..8270055848ca 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
167 /* Port received a wakeup request */ 167 /* Port received a wakeup request */
168 set_bit(port, &uhci->resuming_ports); 168 set_bit(port, &uhci->resuming_ports);
169 uhci->ports_timeout = jiffies + 169 uhci->ports_timeout = jiffies +
170 msecs_to_jiffies(20); 170 msecs_to_jiffies(25);
171 171
172 /* Make sure we see the port again 172 /* Make sure we see the port again
173 * after the resuming period is over. */ 173 * after the resuming period is over. */
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 0025847743f3..8b37a4b9839e 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3245,6 +3245,7 @@ static struct usb_device_id sisusb_table [] = {
3245 { USB_DEVICE(0x0711, 0x0902) }, 3245 { USB_DEVICE(0x0711, 0x0902) },
3246 { USB_DEVICE(0x0711, 0x0903) }, 3246 { USB_DEVICE(0x0711, 0x0903) },
3247 { USB_DEVICE(0x0711, 0x0918) }, 3247 { USB_DEVICE(0x0711, 0x0918) },
3248 { USB_DEVICE(0x0711, 0x0920) },
3248 { USB_DEVICE(0x182d, 0x021c) }, 3249 { USB_DEVICE(0x182d, 0x021c) },
3249 { USB_DEVICE(0x182d, 0x0269) }, 3250 { USB_DEVICE(0x182d, 0x0269) },
3250 { } 3251 { }
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index de56b3d743d7..3d2d3e549bd1 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -44,6 +44,7 @@ config ISP1301_OMAP
44config USB_ULPI 44config USB_ULPI
45 bool "Generic ULPI Transceiver Driver" 45 bool "Generic ULPI Transceiver Driver"
46 depends on ARM 46 depends on ARM
47 select USB_OTG_UTILS
47 help 48 help
48 Enable this to support ULPI connected USB OTG transceivers which 49 Enable this to support ULPI connected USB OTG transceivers which
49 are likely found on embedded boards. 50 are likely found on embedded boards.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 216f187582ab..7638828e7317 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -50,7 +50,7 @@
50 * Version Information 50 * Version Information
51 */ 51 */
52#define DRIVER_VERSION "v1.5.0" 52#define DRIVER_VERSION "v1.5.0"
53#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" 53#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
54#define DRIVER_DESC "USB FTDI Serial Converters Driver" 54#define DRIVER_DESC "USB FTDI Serial Converters Driver"
55 55
56static int debug; 56static int debug;
@@ -145,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
145 145
146 146
147 147
148/*
149 * Device ID not listed? Test via module params product/vendor or
150 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
151 */
148static struct usb_device_id id_table_combined [] = { 152static struct usb_device_id id_table_combined [] = {
149 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 153 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
150 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 154 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
151 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, 155 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
156 { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
152 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, 157 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
153 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, 158 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
154 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, 159 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -552,9 +557,16 @@ static struct usb_device_id id_table_combined [] = {
552 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, 557 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
553 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, 558 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
554 /* 559 /*
555 * Due to many user requests for multiple ELV devices we enable 560 * ELV devices:
556 * them by default.
557 */ 561 */
562 { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
563 { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
564 { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
565 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) },
566 { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) },
567 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) },
568 { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) },
569 { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) },
558 { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, 570 { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) },
559 { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, 571 { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) },
560 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, 572 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) },
@@ -571,11 +583,17 @@ static struct usb_device_id id_table_combined [] = {
571 { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, 583 { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) },
572 { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, 584 { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) },
573 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, 585 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
586 { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) },
574 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, 587 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
588 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) },
575 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, 589 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
576 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, 590 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
577 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, 591 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
578 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, 592 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
593 { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) },
594 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
595 { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
596 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
579 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, 597 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
580 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, 598 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
581 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, 599 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -697,6 +715,7 @@ static struct usb_device_id id_table_combined [] = {
697 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 715 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
698 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 716 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
699 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, 717 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
718 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
700 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, 719 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
701 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, 720 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
702 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, 721 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index da92b4952ffb..c8951aeed983 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -38,6 +38,8 @@
38/* www.candapter.com Ewert Energy Systems CANdapter device */ 38/* www.candapter.com Ewert Energy Systems CANdapter device */
39#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ 39#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
40 40
41#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
42
41/* OOCDlink by Joern Kaipf <joernk@web.de> 43/* OOCDlink by Joern Kaipf <joernk@web.de>
42 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ 44 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
43#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ 45#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
@@ -161,22 +163,37 @@
161/* 163/*
162 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). 164 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
163 * All of these devices use FTDI's vendor ID (0x0403). 165 * All of these devices use FTDI's vendor ID (0x0403).
166 * Further IDs taken from ELV Windows .inf file.
164 * 167 *
165 * The previously included PID for the UO 100 module was incorrect. 168 * The previously included PID for the UO 100 module was incorrect.
166 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). 169 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
167 * 170 *
168 * Armin Laeuger originally sent the PID for the UM 100 module. 171 * Armin Laeuger originally sent the PID for the UM 100 module.
169 */ 172 */
173#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
174#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
175#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
176#define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */
177#define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */
178#define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */
179#define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */
180#define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */
170#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ 181#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
171#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ 182#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
172#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ 183#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
184#define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */
185#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
186#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
187#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
173#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ 188#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
174#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ 189#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
175#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ 190#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
176#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ 191#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
177#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ 192#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
178#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ 193#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
194#define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */
179#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ 195#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
196#define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */
180#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ 197#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
181#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ 198#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
182#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ 199#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
@@ -968,6 +985,7 @@
968#define PAPOUCH_VID 0x5050 /* Vendor ID */ 985#define PAPOUCH_VID 0x5050 /* Vendor ID */
969#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ 986#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
970#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ 987#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
988#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */
971 989
972/* 990/*
973 * Marvell SheevaPlug 991 * Marvell SheevaPlug
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index f1ea3a33b6e6..83443d6306d6 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -386,12 +386,12 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
386 386
387 dbg("%s - port %d", __func__, port->number); 387 dbg("%s - port %d", __func__, port->number);
388 388
389 if (serial->type->max_in_flight_urbs) { 389 spin_lock_irqsave(&port->lock, flags);
390 spin_lock_irqsave(&port->lock, flags); 390 if (serial->type->max_in_flight_urbs)
391 chars = port->tx_bytes_flight; 391 chars = port->tx_bytes_flight;
392 spin_unlock_irqrestore(&port->lock, flags); 392 else if (serial->num_bulk_out)
393 } else if (serial->num_bulk_out)
394 chars = kfifo_len(&port->write_fifo); 393 chars = kfifo_len(&port->write_fifo);
394 spin_unlock_irqrestore(&port->lock, flags);
395 395
396 dbg("%s - returns %d", __func__, chars); 396 dbg("%s - returns %d", __func__, chars);
397 return chars; 397 return chars;
@@ -489,6 +489,8 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
489 dbg("%s - port %d", __func__, port->number); 489 dbg("%s - port %d", __func__, port->number);
490 490
491 if (port->serial->type->max_in_flight_urbs) { 491 if (port->serial->type->max_in_flight_urbs) {
492 kfree(urb->transfer_buffer);
493
492 spin_lock_irqsave(&port->lock, flags); 494 spin_lock_irqsave(&port->lock, flags);
493 --port->urbs_in_flight; 495 --port->urbs_in_flight;
494 port->tx_bytes_flight -= urb->transfer_buffer_length; 496 port->tx_bytes_flight -= urb->transfer_buffer_length;
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ac1b6449fb6a..3eb6143bb646 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -298,6 +298,7 @@ static struct usb_device_id id_table [] = {
298 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 298 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
300 }, 300 },
301 { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
301 302
302 { } 303 { }
303}; 304};
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 64a0a2c27e12..49575fba3756 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -941,7 +941,7 @@ UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999,
941UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, 941UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133,
942 "Microtech", 942 "Microtech",
943 "USB-SCSI-DB25", 943 "USB-SCSI-DB25",
944 US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, 944 US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init,
945 US_FL_SCM_MULT_TARG ), 945 US_FL_SCM_MULT_TARG ),
946 946
947UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100, 947UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,
@@ -1807,13 +1807,6 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
1807 US_SC_DEVICE, US_PR_DEVICE, NULL, 1807 US_SC_DEVICE, US_PR_DEVICE, NULL,
1808 US_FL_GO_SLOW ), 1808 US_FL_GO_SLOW ),
1809 1809
1810/* Reported by Rohan Hart <rohan.hart17@gmail.com> */
1811UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
1812 "INTOVA",
1813 "Pixtreme",
1814 US_SC_DEVICE, US_PR_DEVICE, NULL,
1815 US_FL_FIX_CAPACITY ),
1816
1817/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com> 1810/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
1818 * Mio Moov 330 1811 * Mio Moov 330
1819 */ 1812 */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 5a53d4f0dd11..e9f995486ec1 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -434,7 +434,8 @@ static void adjust_quirks(struct us_data *us)
434 u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor); 434 u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
435 u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct); 435 u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
436 unsigned f = 0; 436 unsigned f = 0;
437 unsigned int mask = (US_FL_SANE_SENSE | US_FL_FIX_CAPACITY | 437 unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
438 US_FL_FIX_CAPACITY |
438 US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE | 439 US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
439 US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 | 440 US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
440 US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE | 441 US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 409ca9643528..a3a7f8938175 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -139,8 +139,6 @@ static int omapbl_probe(struct platform_device *pdev)
139 if (!pdata) 139 if (!pdata)
140 return -ENXIO; 140 return -ENXIO;
141 141
142 omapbl_ops.check_fb = pdata->check_fb;
143
144 bl = kzalloc(sizeof(struct omap_backlight), GFP_KERNEL); 142 bl = kzalloc(sizeof(struct omap_backlight), GFP_KERNEL);
145 if (unlikely(!bl)) 143 if (unlikely(!bl))
146 return -ENOMEM; 144 return -ENOMEM;
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 66358fa825f3..b4b6deceed15 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -593,7 +593,8 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
593 */ 593 */
594static int imxfb_suspend(struct platform_device *dev, pm_message_t state) 594static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
595{ 595{
596 struct imxfb_info *fbi = platform_get_drvdata(dev); 596 struct fb_info *info = platform_get_drvdata(dev);
597 struct imxfb_info *fbi = info->par;
597 598
598 pr_debug("%s\n", __func__); 599 pr_debug("%s\n", __func__);
599 600
@@ -603,7 +604,8 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
603 604
604static int imxfb_resume(struct platform_device *dev) 605static int imxfb_resume(struct platform_device *dev)
605{ 606{
606 struct imxfb_info *fbi = platform_get_drvdata(dev); 607 struct fb_info *info = platform_get_drvdata(dev);
608 struct imxfb_info *fbi = info->par;
607 609
608 pr_debug("%s\n", __func__); 610 pr_debug("%s\n", __func__);
609 611
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 054ef29be479..772ba3f45e6f 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -324,8 +324,11 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
324 unsigned long flags; 324 unsigned long flags;
325 dma_cookie_t cookie; 325 dma_cookie_t cookie;
326 326
327 dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi, 327 if (mx3_fbi->txd)
328 to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg); 328 dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
329 to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
330 else
331 dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi);
329 332
330 /* This enables the channel */ 333 /* This enables the channel */
331 if (mx3_fbi->cookie < 0) { 334 if (mx3_fbi->cookie < 0) {
@@ -646,6 +649,7 @@ static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t a
646 649
647static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value) 650static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
648{ 651{
652 dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value);
649 /* This might be board-specific */ 653 /* This might be board-specific */
650 mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL); 654 mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
651 return; 655 return;
@@ -1486,12 +1490,12 @@ static int mx3fb_probe(struct platform_device *pdev)
1486 goto ersdc0; 1490 goto ersdc0;
1487 } 1491 }
1488 1492
1493 mx3fb->backlight_level = 255;
1494
1489 ret = init_fb_chan(mx3fb, to_idmac_chan(chan)); 1495 ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
1490 if (ret < 0) 1496 if (ret < 0)
1491 goto eisdc0; 1497 goto eisdc0;
1492 1498
1493 mx3fb->backlight_level = 255;
1494
1495 return 0; 1499 return 0;
1496 1500
1497eisdc0: 1501eisdc0:
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index c7c6455f1fa8..e192b058a688 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -189,11 +189,6 @@ static struct {
189 struct omapfb_color_key color_key; 189 struct omapfb_color_key color_key;
190} dispc; 190} dispc;
191 191
192static struct platform_device omapdss_device = {
193 .name = "omapdss",
194 .id = -1,
195};
196
197static void enable_lcd_clocks(int enable); 192static void enable_lcd_clocks(int enable);
198 193
199static void inline dispc_write_reg(int idx, u32 val) 194static void inline dispc_write_reg(int idx, u32 val)
@@ -920,20 +915,20 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
920 915
921static int get_dss_clocks(void) 916static int get_dss_clocks(void)
922{ 917{
923 dispc.dss_ick = clk_get(&omapdss_device.dev, "ick"); 918 dispc.dss_ick = clk_get(&dispc.fbdev->dssdev->dev, "ick");
924 if (IS_ERR(dispc.dss_ick)) { 919 if (IS_ERR(dispc.dss_ick)) {
925 dev_err(dispc.fbdev->dev, "can't get ick\n"); 920 dev_err(dispc.fbdev->dev, "can't get ick\n");
926 return PTR_ERR(dispc.dss_ick); 921 return PTR_ERR(dispc.dss_ick);
927 } 922 }
928 923
929 dispc.dss1_fck = clk_get(&omapdss_device.dev, "dss1_fck"); 924 dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck");
930 if (IS_ERR(dispc.dss1_fck)) { 925 if (IS_ERR(dispc.dss1_fck)) {
931 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); 926 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
932 clk_put(dispc.dss_ick); 927 clk_put(dispc.dss_ick);
933 return PTR_ERR(dispc.dss1_fck); 928 return PTR_ERR(dispc.dss1_fck);
934 } 929 }
935 930
936 dispc.dss_54m_fck = clk_get(&omapdss_device.dev, "tv_fck"); 931 dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_fck");
937 if (IS_ERR(dispc.dss_54m_fck)) { 932 if (IS_ERR(dispc.dss_54m_fck)) {
938 dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); 933 dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
939 clk_put(dispc.dss_ick); 934 clk_put(dispc.dss_ick);
@@ -1385,12 +1380,6 @@ static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
1385 int skip_init = 0; 1380 int skip_init = 0;
1386 int i; 1381 int i;
1387 1382
1388 r = platform_device_register(&omapdss_device);
1389 if (r) {
1390 dev_err(fbdev->dev, "can't register omapdss device\n");
1391 return r;
1392 }
1393
1394 memset(&dispc, 0, sizeof(dispc)); 1383 memset(&dispc, 0, sizeof(dispc));
1395 1384
1396 dispc.base = ioremap(DISPC_BASE, SZ_1K); 1385 dispc.base = ioremap(DISPC_BASE, SZ_1K);
@@ -1534,7 +1523,6 @@ static void omap_dispc_cleanup(void)
1534 free_irq(INT_24XX_DSS_IRQ, dispc.fbdev); 1523 free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
1535 put_dss_clocks(); 1524 put_dss_clocks();
1536 iounmap(dispc.base); 1525 iounmap(dispc.base);
1537 platform_device_unregister(&omapdss_device);
1538} 1526}
1539 1527
1540const struct lcd_ctrl omap2_int_ctrl = { 1528const struct lcd_ctrl omap2_int_ctrl = {
diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/omap/lcd_htcherald.c
index a9007c5d1fad..4802419da83b 100644
--- a/drivers/video/omap/lcd_htcherald.c
+++ b/drivers/video/omap/lcd_htcherald.c
@@ -115,12 +115,12 @@ struct platform_driver htcherald_panel_driver = {
115 }, 115 },
116}; 116};
117 117
118static int htcherald_panel_drv_init(void) 118static int __init htcherald_panel_drv_init(void)
119{ 119{
120 return platform_driver_register(&htcherald_panel_driver); 120 return platform_driver_register(&htcherald_panel_driver);
121} 121}
122 122
123static void htcherald_panel_drv_cleanup(void) 123static void __exit htcherald_panel_drv_cleanup(void)
124{ 124{
125 platform_driver_unregister(&htcherald_panel_driver); 125 platform_driver_unregister(&htcherald_panel_driver);
126} 126}
diff --git a/drivers/video/omap/omapfb.h b/drivers/video/omap/omapfb.h
index 46e4714014e8..af3c9e571ec3 100644
--- a/drivers/video/omap/omapfb.h
+++ b/drivers/video/omap/omapfb.h
@@ -203,6 +203,8 @@ struct omapfb_device {
203 203
204 struct omapfb_mem_desc mem_desc; 204 struct omapfb_mem_desc mem_desc;
205 struct fb_info *fb_info[OMAPFB_PLANE_NUM]; 205 struct fb_info *fb_info[OMAPFB_PLANE_NUM];
206
207 struct platform_device *dssdev; /* dummy dev for clocks */
206}; 208};
207 209
208#ifdef CONFIG_ARCH_OMAP1 210#ifdef CONFIG_ARCH_OMAP1
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index c7f59a5ccdbc..2c4f470fa086 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -83,6 +83,19 @@ static struct caps_table_struct color_caps[] = {
83 { 1 << OMAPFB_COLOR_YUY422, "YUY422", }, 83 { 1 << OMAPFB_COLOR_YUY422, "YUY422", },
84}; 84};
85 85
86static void omapdss_release(struct device *dev)
87{
88}
89
90/* dummy device for clocks */
91static struct platform_device omapdss_device = {
92 .name = "omapdss",
93 .id = -1,
94 .dev = {
95 .release = omapdss_release,
96 },
97};
98
86/* 99/*
87 * --------------------------------------------------------------------------- 100 * ---------------------------------------------------------------------------
88 * LCD panel 101 * LCD panel
@@ -1700,6 +1713,7 @@ static int omapfb_do_probe(struct platform_device *pdev,
1700 1713
1701 fbdev->dev = &pdev->dev; 1714 fbdev->dev = &pdev->dev;
1702 fbdev->panel = panel; 1715 fbdev->panel = panel;
1716 fbdev->dssdev = &omapdss_device;
1703 platform_set_drvdata(pdev, fbdev); 1717 platform_set_drvdata(pdev, fbdev);
1704 1718
1705 mutex_init(&fbdev->rqueue_mutex); 1719 mutex_init(&fbdev->rqueue_mutex);
@@ -1814,8 +1828,16 @@ cleanup:
1814 1828
1815static int omapfb_probe(struct platform_device *pdev) 1829static int omapfb_probe(struct platform_device *pdev)
1816{ 1830{
1831 int r;
1832
1817 BUG_ON(fbdev_pdev != NULL); 1833 BUG_ON(fbdev_pdev != NULL);
1818 1834
1835 r = platform_device_register(&omapdss_device);
1836 if (r) {
1837 dev_err(&pdev->dev, "can't register omapdss device\n");
1838 return r;
1839 }
1840
1819 /* Delay actual initialization until the LCD is registered */ 1841 /* Delay actual initialization until the LCD is registered */
1820 fbdev_pdev = pdev; 1842 fbdev_pdev = pdev;
1821 if (fbdev_panel != NULL) 1843 if (fbdev_panel != NULL)
@@ -1843,6 +1865,9 @@ static int omapfb_remove(struct platform_device *pdev)
1843 fbdev->state = OMAPFB_DISABLED; 1865 fbdev->state = OMAPFB_DISABLED;
1844 omapfb_free_resources(fbdev, saved_state); 1866 omapfb_free_resources(fbdev, saved_state);
1845 1867
1868 platform_device_unregister(&omapdss_device);
1869 fbdev->dssdev = NULL;
1870
1846 return 0; 1871 return 0;
1847} 1872}
1848 1873
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index fed7b1bda19c..1162603c72e5 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -83,13 +83,13 @@ static inline u32 rfbi_read_reg(int idx)
83 83
84static int rfbi_get_clocks(void) 84static int rfbi_get_clocks(void)
85{ 85{
86 rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick"); 86 rfbi.dss_ick = clk_get(&dispc.fbdev->dssdev->dev, "ick");
87 if (IS_ERR(rfbi.dss_ick)) { 87 if (IS_ERR(rfbi.dss_ick)) {
88 dev_err(rfbi.fbdev->dev, "can't get ick\n"); 88 dev_err(rfbi.fbdev->dev, "can't get ick\n");
89 return PTR_ERR(rfbi.dss_ick); 89 return PTR_ERR(rfbi.dss_ick);
90 } 90 }
91 91
92 rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck"); 92 rfbi.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck");
93 if (IS_ERR(rfbi.dss1_fck)) { 93 if (IS_ERR(rfbi.dss1_fck)) {
94 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); 94 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n");
95 clk_put(rfbi.dss_ick); 95 clk_put(rfbi.dss_ick);
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 71d8dec30635..c63ce767b277 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -25,6 +25,13 @@ config OMAP2_DSS_DEBUG_SUPPORT
25 This enables debug messages. You need to enable printing 25 This enables debug messages. You need to enable printing
26 with 'debug' module parameter. 26 with 'debug' module parameter.
27 27
28config OMAP2_DSS_COLLECT_IRQ_STATS
29 bool "Collect DSS IRQ statistics"
30 depends on OMAP2_DSS_DEBUG_SUPPORT
31 default n
32 help
33 Collect DSS IRQ statistics, printable via debugfs
34
28config OMAP2_DSS_RFBI 35config OMAP2_DSS_RFBI
29 bool "RFBI support" 36 bool "RFBI support"
30 default n 37 default n
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 29497a0c9a91..82918eec6d2e 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -124,6 +124,7 @@ static void restore_all_ctx(void)
124 dss_clk_disable_all_no_ctx(); 124 dss_clk_disable_all_no_ctx();
125} 125}
126 126
127#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
127/* CLOCKS */ 128/* CLOCKS */
128static void core_dump_clocks(struct seq_file *s) 129static void core_dump_clocks(struct seq_file *s)
129{ 130{
@@ -149,6 +150,7 @@ static void core_dump_clocks(struct seq_file *s)
149 clocks[i]->usecount); 150 clocks[i]->usecount);
150 } 151 }
151} 152}
153#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
152 154
153static int dss_get_clock(struct clk **clock, const char *clk_name) 155static int dss_get_clock(struct clk **clock, const char *clk_name)
154{ 156{
@@ -395,6 +397,14 @@ static int dss_initialize_debugfs(void)
395 debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, 397 debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
396 &dss_debug_dump_clocks, &dss_debug_fops); 398 &dss_debug_dump_clocks, &dss_debug_fops);
397 399
400 debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
401 &dispc_dump_irqs, &dss_debug_fops);
402
403#ifdef CONFIG_OMAP2_DSS_DSI
404 debugfs_create_file("dsi_irq", S_IRUGO, dss_debugfs_dir,
405 &dsi_dump_irqs, &dss_debug_fops);
406#endif
407
398 debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir, 408 debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
399 &dss_dump_regs, &dss_debug_fops); 409 &dss_dump_regs, &dss_debug_fops);
400 debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir, 410 debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 6dabf4b2f005..de8bfbac9e26 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -148,6 +148,12 @@ static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES,
148 DISPC_VID_ATTRIBUTES(0), 148 DISPC_VID_ATTRIBUTES(0),
149 DISPC_VID_ATTRIBUTES(1) }; 149 DISPC_VID_ATTRIBUTES(1) };
150 150
151struct dispc_irq_stats {
152 unsigned long last_reset;
153 unsigned irq_count;
154 unsigned irqs[32];
155};
156
151static struct { 157static struct {
152 void __iomem *base; 158 void __iomem *base;
153 159
@@ -160,6 +166,11 @@ static struct {
160 struct work_struct error_work; 166 struct work_struct error_work;
161 167
162 u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; 168 u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
169
170#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
171 spinlock_t irq_stats_lock;
172 struct dispc_irq_stats irq_stats;
173#endif
163} dispc; 174} dispc;
164 175
165static void _omap_dispc_set_irqs(void); 176static void _omap_dispc_set_irqs(void);
@@ -1443,7 +1454,10 @@ static unsigned long calc_fclk_five_taps(u16 width, u16 height,
1443 do_div(tmp, 2 * out_height * ppl); 1454 do_div(tmp, 2 * out_height * ppl);
1444 fclk = tmp; 1455 fclk = tmp;
1445 1456
1446 if (height > 2 * out_height && ppl != out_width) { 1457 if (height > 2 * out_height) {
1458 if (ppl == out_width)
1459 return 0;
1460
1447 tmp = pclk * (height - 2 * out_height) * out_width; 1461 tmp = pclk * (height - 2 * out_height) * out_width;
1448 do_div(tmp, 2 * out_height * (ppl - out_width)); 1462 do_div(tmp, 2 * out_height * (ppl - out_width));
1449 fclk = max(fclk, (u32) tmp); 1463 fclk = max(fclk, (u32) tmp);
@@ -1623,7 +1637,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
1623 DSSDBG("required fclk rate = %lu Hz\n", fclk); 1637 DSSDBG("required fclk rate = %lu Hz\n", fclk);
1624 DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate()); 1638 DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
1625 1639
1626 if (fclk > dispc_fclk_rate()) { 1640 if (!fclk || fclk > dispc_fclk_rate()) {
1627 DSSERR("failed to set up scaling, " 1641 DSSERR("failed to set up scaling, "
1628 "required fclk rate = %lu Hz, " 1642 "required fclk rate = %lu Hz, "
1629 "current fclk rate = %lu Hz\n", 1643 "current fclk rate = %lu Hz\n",
@@ -2247,6 +2261,50 @@ void dispc_dump_clocks(struct seq_file *s)
2247 enable_clocks(0); 2261 enable_clocks(0);
2248} 2262}
2249 2263
2264#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
2265void dispc_dump_irqs(struct seq_file *s)
2266{
2267 unsigned long flags;
2268 struct dispc_irq_stats stats;
2269
2270 spin_lock_irqsave(&dispc.irq_stats_lock, flags);
2271
2272 stats = dispc.irq_stats;
2273 memset(&dispc.irq_stats, 0, sizeof(dispc.irq_stats));
2274 dispc.irq_stats.last_reset = jiffies;
2275
2276 spin_unlock_irqrestore(&dispc.irq_stats_lock, flags);
2277
2278 seq_printf(s, "period %u ms\n",
2279 jiffies_to_msecs(jiffies - stats.last_reset));
2280
2281 seq_printf(s, "irqs %d\n", stats.irq_count);
2282#define PIS(x) \
2283 seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
2284
2285 PIS(FRAMEDONE);
2286 PIS(VSYNC);
2287 PIS(EVSYNC_EVEN);
2288 PIS(EVSYNC_ODD);
2289 PIS(ACBIAS_COUNT_STAT);
2290 PIS(PROG_LINE_NUM);
2291 PIS(GFX_FIFO_UNDERFLOW);
2292 PIS(GFX_END_WIN);
2293 PIS(PAL_GAMMA_MASK);
2294 PIS(OCP_ERR);
2295 PIS(VID1_FIFO_UNDERFLOW);
2296 PIS(VID1_END_WIN);
2297 PIS(VID2_FIFO_UNDERFLOW);
2298 PIS(VID2_END_WIN);
2299 PIS(SYNC_LOST);
2300 PIS(SYNC_LOST_DIGIT);
2301 PIS(WAKEUP);
2302#undef PIS
2303}
2304#else
2305void dispc_dump_irqs(struct seq_file *s) { }
2306#endif
2307
2250void dispc_dump_regs(struct seq_file *s) 2308void dispc_dump_regs(struct seq_file *s)
2251{ 2309{
2252#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r)) 2310#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r))
@@ -2665,6 +2723,13 @@ void dispc_irq_handler(void)
2665 2723
2666 irqstatus = dispc_read_reg(DISPC_IRQSTATUS); 2724 irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
2667 2725
2726#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
2727 spin_lock(&dispc.irq_stats_lock);
2728 dispc.irq_stats.irq_count++;
2729 dss_collect_irq_stats(irqstatus, dispc.irq_stats.irqs);
2730 spin_unlock(&dispc.irq_stats_lock);
2731#endif
2732
2668#ifdef DEBUG 2733#ifdef DEBUG
2669 if (dss_debug) 2734 if (dss_debug)
2670 print_irq_status(irqstatus); 2735 print_irq_status(irqstatus);
@@ -3012,6 +3077,11 @@ int dispc_init(void)
3012 3077
3013 spin_lock_init(&dispc.irq_lock); 3078 spin_lock_init(&dispc.irq_lock);
3014 3079
3080#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
3081 spin_lock_init(&dispc.irq_stats_lock);
3082 dispc.irq_stats.last_reset = jiffies;
3083#endif
3084
3015 INIT_WORK(&dispc.error_work, dispc_error_worker); 3085 INIT_WORK(&dispc.error_work, dispc_error_worker);
3016 3086
3017 dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS); 3087 dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 5936487b5def..6122178f5f85 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -204,6 +204,14 @@ struct dsi_update_region {
204 struct omap_dss_device *device; 204 struct omap_dss_device *device;
205}; 205};
206 206
207struct dsi_irq_stats {
208 unsigned long last_reset;
209 unsigned irq_count;
210 unsigned dsi_irqs[32];
211 unsigned vc_irqs[4][32];
212 unsigned cio_irqs[32];
213};
214
207static struct 215static struct
208{ 216{
209 void __iomem *base; 217 void __iomem *base;
@@ -258,6 +266,11 @@ static struct
258#endif 266#endif
259 int debug_read; 267 int debug_read;
260 int debug_write; 268 int debug_write;
269
270#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
271 spinlock_t irq_stats_lock;
272 struct dsi_irq_stats irq_stats;
273#endif
261} dsi; 274} dsi;
262 275
263#ifdef DEBUG 276#ifdef DEBUG
@@ -528,6 +541,12 @@ void dsi_irq_handler(void)
528 541
529 irqstatus = dsi_read_reg(DSI_IRQSTATUS); 542 irqstatus = dsi_read_reg(DSI_IRQSTATUS);
530 543
544#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
545 spin_lock(&dsi.irq_stats_lock);
546 dsi.irq_stats.irq_count++;
547 dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs);
548#endif
549
531 if (irqstatus & DSI_IRQ_ERROR_MASK) { 550 if (irqstatus & DSI_IRQ_ERROR_MASK) {
532 DSSERR("DSI error, irqstatus %x\n", irqstatus); 551 DSSERR("DSI error, irqstatus %x\n", irqstatus);
533 print_irq_status(irqstatus); 552 print_irq_status(irqstatus);
@@ -549,6 +568,10 @@ void dsi_irq_handler(void)
549 568
550 vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i)); 569 vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
551 570
571#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
572 dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]);
573#endif
574
552 if (vcstatus & DSI_VC_IRQ_BTA) 575 if (vcstatus & DSI_VC_IRQ_BTA)
553 complete(&dsi.bta_completion); 576 complete(&dsi.bta_completion);
554 577
@@ -568,6 +591,10 @@ void dsi_irq_handler(void)
568 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { 591 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
569 ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); 592 ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
570 593
594#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
595 dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs);
596#endif
597
571 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus); 598 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
572 /* flush posted write */ 599 /* flush posted write */
573 dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); 600 dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
@@ -579,6 +606,10 @@ void dsi_irq_handler(void)
579 dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); 606 dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
580 /* flush posted write */ 607 /* flush posted write */
581 dsi_read_reg(DSI_IRQSTATUS); 608 dsi_read_reg(DSI_IRQSTATUS);
609
610#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
611 spin_unlock(&dsi.irq_stats_lock);
612#endif
582} 613}
583 614
584 615
@@ -797,12 +828,12 @@ static int dsi_pll_power(enum dsi_pll_power_state state)
797 828
798 /* PLL_PWR_STATUS */ 829 /* PLL_PWR_STATUS */
799 while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) { 830 while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
800 udelay(1); 831 if (++t > 1000) {
801 if (t++ > 1000) {
802 DSSERR("Failed to set DSI PLL power mode to %d\n", 832 DSSERR("Failed to set DSI PLL power mode to %d\n",
803 state); 833 state);
804 return -ENODEV; 834 return -ENODEV;
805 } 835 }
836 udelay(1);
806 } 837 }
807 838
808 return 0; 839 return 0;
@@ -1226,6 +1257,95 @@ void dsi_dump_clocks(struct seq_file *s)
1226 enable_clocks(0); 1257 enable_clocks(0);
1227} 1258}
1228 1259
1260#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1261void dsi_dump_irqs(struct seq_file *s)
1262{
1263 unsigned long flags;
1264 struct dsi_irq_stats stats;
1265
1266 spin_lock_irqsave(&dsi.irq_stats_lock, flags);
1267
1268 stats = dsi.irq_stats;
1269 memset(&dsi.irq_stats, 0, sizeof(dsi.irq_stats));
1270 dsi.irq_stats.last_reset = jiffies;
1271
1272 spin_unlock_irqrestore(&dsi.irq_stats_lock, flags);
1273
1274 seq_printf(s, "period %u ms\n",
1275 jiffies_to_msecs(jiffies - stats.last_reset));
1276
1277 seq_printf(s, "irqs %d\n", stats.irq_count);
1278#define PIS(x) \
1279 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1280
1281 seq_printf(s, "-- DSI interrupts --\n");
1282 PIS(VC0);
1283 PIS(VC1);
1284 PIS(VC2);
1285 PIS(VC3);
1286 PIS(WAKEUP);
1287 PIS(RESYNC);
1288 PIS(PLL_LOCK);
1289 PIS(PLL_UNLOCK);
1290 PIS(PLL_RECALL);
1291 PIS(COMPLEXIO_ERR);
1292 PIS(HS_TX_TIMEOUT);
1293 PIS(LP_RX_TIMEOUT);
1294 PIS(TE_TRIGGER);
1295 PIS(ACK_TRIGGER);
1296 PIS(SYNC_LOST);
1297 PIS(LDO_POWER_GOOD);
1298 PIS(TA_TIMEOUT);
1299#undef PIS
1300
1301#define PIS(x) \
1302 seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1303 stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1304 stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1305 stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1306 stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1307
1308 seq_printf(s, "-- VC interrupts --\n");
1309 PIS(CS);
1310 PIS(ECC_CORR);
1311 PIS(PACKET_SENT);
1312 PIS(FIFO_TX_OVF);
1313 PIS(FIFO_RX_OVF);
1314 PIS(BTA);
1315 PIS(ECC_NO_CORR);
1316 PIS(FIFO_TX_UDF);
1317 PIS(PP_BUSY_CHANGE);
1318#undef PIS
1319
1320#define PIS(x) \
1321 seq_printf(s, "%-20s %10d\n", #x, \
1322 stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1323
1324 seq_printf(s, "-- CIO interrupts --\n");
1325 PIS(ERRSYNCESC1);
1326 PIS(ERRSYNCESC2);
1327 PIS(ERRSYNCESC3);
1328 PIS(ERRESC1);
1329 PIS(ERRESC2);
1330 PIS(ERRESC3);
1331 PIS(ERRCONTROL1);
1332 PIS(ERRCONTROL2);
1333 PIS(ERRCONTROL3);
1334 PIS(STATEULPS1);
1335 PIS(STATEULPS2);
1336 PIS(STATEULPS3);
1337 PIS(ERRCONTENTIONLP0_1);
1338 PIS(ERRCONTENTIONLP1_1);
1339 PIS(ERRCONTENTIONLP0_2);
1340 PIS(ERRCONTENTIONLP1_2);
1341 PIS(ERRCONTENTIONLP0_3);
1342 PIS(ERRCONTENTIONLP1_3);
1343 PIS(ULPSACTIVENOT_ALL0);
1344 PIS(ULPSACTIVENOT_ALL1);
1345#undef PIS
1346}
1347#endif
1348
1229void dsi_dump_regs(struct seq_file *s) 1349void dsi_dump_regs(struct seq_file *s)
1230{ 1350{
1231#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r)) 1351#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
@@ -1321,12 +1441,12 @@ static int dsi_complexio_power(enum dsi_complexio_power_state state)
1321 1441
1322 /* PWR_STATUS */ 1442 /* PWR_STATUS */
1323 while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) { 1443 while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
1324 udelay(1); 1444 if (++t > 1000) {
1325 if (t++ > 1000) {
1326 DSSERR("failed to set complexio power state to " 1445 DSSERR("failed to set complexio power state to "
1327 "%d\n", state); 1446 "%d\n", state);
1328 return -ENODEV; 1447 return -ENODEV;
1329 } 1448 }
1449 udelay(1);
1330 } 1450 }
1331 1451
1332 return 0; 1452 return 0;
@@ -1526,10 +1646,10 @@ static void dsi_complexio_uninit(void)
1526 1646
1527static int _dsi_wait_reset(void) 1647static int _dsi_wait_reset(void)
1528{ 1648{
1529 int i = 0; 1649 int t = 0;
1530 1650
1531 while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) { 1651 while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) {
1532 if (i++ > 5) { 1652 if (++t > 5) {
1533 DSSERR("soft reset failed\n"); 1653 DSSERR("soft reset failed\n");
1534 return -ENODEV; 1654 return -ENODEV;
1535 } 1655 }
@@ -1999,7 +2119,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
1999 return -EINVAL; 2119 return -EINVAL;
2000 } 2120 }
2001 2121
2002 data_id = data_type | channel << 6; 2122 data_id = data_type | dsi.vc[channel].dest_per << 6;
2003 2123
2004 r = (data_id << 0) | (data << 8) | (ecc << 24); 2124 r = (data_id << 0) | (data << 8) | (ecc << 24);
2005 2125
@@ -2011,7 +2131,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
2011int dsi_vc_send_null(int channel) 2131int dsi_vc_send_null(int channel)
2012{ 2132{
2013 u8 nullpkg[] = {0, 0, 0, 0}; 2133 u8 nullpkg[] = {0, 0, 0, 0};
2014 return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0); 2134 return dsi_vc_send_long(channel, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
2015} 2135}
2016EXPORT_SYMBOL(dsi_vc_send_null); 2136EXPORT_SYMBOL(dsi_vc_send_null);
2017 2137
@@ -2058,7 +2178,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2058 int r; 2178 int r;
2059 2179
2060 if (dsi.debug_read) 2180 if (dsi.debug_read)
2061 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %u)\n", channel, dcs_cmd); 2181 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
2062 2182
2063 r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0); 2183 r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
2064 if (r) 2184 if (r)
@@ -2586,7 +2706,6 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2586 /* using fifo not empty */ 2706 /* using fifo not empty */
2587 /* TX_FIFO_NOT_EMPTY */ 2707 /* TX_FIFO_NOT_EMPTY */
2588 while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) { 2708 while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
2589 udelay(1);
2590 fifo_stalls++; 2709 fifo_stalls++;
2591 if (fifo_stalls > 0xfffff) { 2710 if (fifo_stalls > 0xfffff) {
2592 DSSERR("fifo stalls overflow, pixels left %d\n", 2711 DSSERR("fifo stalls overflow, pixels left %d\n",
@@ -2594,6 +2713,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2594 dsi_if_enable(0); 2713 dsi_if_enable(0);
2595 return -EIO; 2714 return -EIO;
2596 } 2715 }
2716 udelay(1);
2597 } 2717 }
2598#elif 1 2718#elif 1
2599 /* using fifo emptiness */ 2719 /* using fifo emptiness */
@@ -2812,11 +2932,15 @@ static int dsi_set_update_mode(struct omap_dss_device *dssdev,
2812 2932
2813static int dsi_set_te(struct omap_dss_device *dssdev, bool enable) 2933static int dsi_set_te(struct omap_dss_device *dssdev, bool enable)
2814{ 2934{
2815 int r; 2935 int r = 0;
2816 r = dssdev->driver->enable_te(dssdev, enable); 2936
2817 /* XXX for some reason, DSI TE breaks if we don't wait here. 2937 if (dssdev->driver->enable_te) {
2818 * Panel bug? Needs more studying */ 2938 r = dssdev->driver->enable_te(dssdev, enable);
2819 msleep(100); 2939 /* XXX for some reason, DSI TE breaks if we don't wait here.
2940 * Panel bug? Needs more studying */
2941 msleep(100);
2942 }
2943
2820 return r; 2944 return r;
2821} 2945}
2822 2946
@@ -3637,6 +3761,11 @@ int dsi_init(struct platform_device *pdev)
3637 spin_lock_init(&dsi.errors_lock); 3761 spin_lock_init(&dsi.errors_lock);
3638 dsi.errors = 0; 3762 dsi.errors = 0;
3639 3763
3764#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
3765 spin_lock_init(&dsi.irq_stats_lock);
3766 dsi.irq_stats.last_reset = jiffies;
3767#endif
3768
3640 init_completion(&dsi.bta_completion); 3769 init_completion(&dsi.bta_completion);
3641 init_completion(&dsi.update_completion); 3770 init_completion(&dsi.update_completion);
3642 3771
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 9b05ee65a15d..0a26b7d84d41 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -467,14 +467,14 @@ static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
467 467
468static int _omap_dss_wait_reset(void) 468static int _omap_dss_wait_reset(void)
469{ 469{
470 unsigned timeout = 1000; 470 int t = 0;
471 471
472 while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) { 472 while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
473 udelay(1); 473 if (++t > 1000) {
474 if (!--timeout) {
475 DSSERR("soft reset failed\n"); 474 DSSERR("soft reset failed\n");
476 return -ENODEV; 475 return -ENODEV;
477 } 476 }
477 udelay(1);
478 } 478 }
479 479
480 return 0; 480 return 0;
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 8da5ac42151b..2bcb1245d6c2 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -240,6 +240,7 @@ int dsi_init(struct platform_device *pdev);
240void dsi_exit(void); 240void dsi_exit(void);
241 241
242void dsi_dump_clocks(struct seq_file *s); 242void dsi_dump_clocks(struct seq_file *s);
243void dsi_dump_irqs(struct seq_file *s);
243void dsi_dump_regs(struct seq_file *s); 244void dsi_dump_regs(struct seq_file *s);
244 245
245void dsi_save_context(void); 246void dsi_save_context(void);
@@ -268,6 +269,7 @@ int dpi_init_display(struct omap_dss_device *dssdev);
268int dispc_init(void); 269int dispc_init(void);
269void dispc_exit(void); 270void dispc_exit(void);
270void dispc_dump_clocks(struct seq_file *s); 271void dispc_dump_clocks(struct seq_file *s);
272void dispc_dump_irqs(struct seq_file *s);
271void dispc_dump_regs(struct seq_file *s); 273void dispc_dump_regs(struct seq_file *s);
272void dispc_irq_handler(void); 274void dispc_irq_handler(void);
273void dispc_fake_vsync_irq(void); 275void dispc_fake_vsync_irq(void);
@@ -367,4 +369,16 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
367unsigned long rfbi_get_max_tx_rate(void); 369unsigned long rfbi_get_max_tx_rate(void);
368int rfbi_init_display(struct omap_dss_device *display); 370int rfbi_init_display(struct omap_dss_device *display);
369 371
372
373#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
374static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr)
375{
376 int b;
377 for (b = 0; b < 32; ++b) {
378 if (irqstatus & (1 << b))
379 irq_arr[b]++;
380 }
381}
382#endif
383
370#endif 384#endif
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index d0b3006ad8a5..b936495c065d 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -120,7 +120,7 @@ static struct {
120 120
121 struct omap_dss_device *dssdev[2]; 121 struct omap_dss_device *dssdev[2];
122 122
123 struct kfifo *cmd_fifo; 123 struct kfifo cmd_fifo;
124 spinlock_t cmd_lock; 124 spinlock_t cmd_lock;
125 struct completion cmd_done; 125 struct completion cmd_done;
126 atomic_t cmd_fifo_full; 126 atomic_t cmd_fifo_full;
@@ -1011,20 +1011,20 @@ static void process_cmd_fifo(void)
1011 return; 1011 return;
1012 1012
1013 while (true) { 1013 while (true) {
1014 spin_lock_irqsave(rfbi.cmd_fifo->lock, flags); 1014 spin_lock_irqsave(&rfbi.cmd_lock, flags);
1015 1015
1016 len = __kfifo_get(rfbi.cmd_fifo, (unsigned char *)&p, 1016 len = kfifo_out(&rfbi.cmd_fifo, (unsigned char *)&p,
1017 sizeof(struct update_param)); 1017 sizeof(struct update_param));
1018 if (len == 0) { 1018 if (len == 0) {
1019 DSSDBG("nothing more in fifo\n"); 1019 DSSDBG("nothing more in fifo\n");
1020 atomic_set(&rfbi.cmd_pending, 0); 1020 atomic_set(&rfbi.cmd_pending, 0);
1021 spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); 1021 spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
1022 break; 1022 break;
1023 } 1023 }
1024 1024
1025 /* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/ 1025 /* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/
1026 1026
1027 spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); 1027 spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
1028 1028
1029 BUG_ON(len != sizeof(struct update_param)); 1029 BUG_ON(len != sizeof(struct update_param));
1030 BUG_ON(p.rfbi_module > 1); 1030 BUG_ON(p.rfbi_module > 1);
@@ -1052,25 +1052,25 @@ static void rfbi_push_cmd(struct update_param *p)
1052 unsigned long flags; 1052 unsigned long flags;
1053 int available; 1053 int available;
1054 1054
1055 spin_lock_irqsave(rfbi.cmd_fifo->lock, flags); 1055 spin_lock_irqsave(&rfbi.cmd_lock, flags);
1056 available = RFBI_CMD_FIFO_LEN_BYTES - 1056 available = RFBI_CMD_FIFO_LEN_BYTES -
1057 __kfifo_len(rfbi.cmd_fifo); 1057 kfifo_len(&rfbi.cmd_fifo);
1058 1058
1059/* DSSDBG("%d bytes left in fifo\n", available); */ 1059/* DSSDBG("%d bytes left in fifo\n", available); */
1060 if (available < sizeof(struct update_param)) { 1060 if (available < sizeof(struct update_param)) {
1061 DSSDBG("Going to wait because FIFO FULL..\n"); 1061 DSSDBG("Going to wait because FIFO FULL..\n");
1062 spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); 1062 spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
1063 atomic_inc(&rfbi.cmd_fifo_full); 1063 atomic_inc(&rfbi.cmd_fifo_full);
1064 wait_for_completion(&rfbi.cmd_done); 1064 wait_for_completion(&rfbi.cmd_done);
1065 /*DSSDBG("Woke up because fifo not full anymore\n");*/ 1065 /*DSSDBG("Woke up because fifo not full anymore\n");*/
1066 continue; 1066 continue;
1067 } 1067 }
1068 1068
1069 ret = __kfifo_put(rfbi.cmd_fifo, (unsigned char *)p, 1069 ret = kfifo_in(&rfbi.cmd_fifo, (unsigned char *)p,
1070 sizeof(struct update_param)); 1070 sizeof(struct update_param));
1071/* DSSDBG("pushed %d bytes\n", ret);*/ 1071/* DSSDBG("pushed %d bytes\n", ret);*/
1072 1072
1073 spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); 1073 spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
1074 1074
1075 BUG_ON(ret != sizeof(struct update_param)); 1075 BUG_ON(ret != sizeof(struct update_param));
1076 1076
@@ -1155,12 +1155,12 @@ int rfbi_init(void)
1155{ 1155{
1156 u32 rev; 1156 u32 rev;
1157 u32 l; 1157 u32 l;
1158 int r;
1158 1159
1159 spin_lock_init(&rfbi.cmd_lock); 1160 spin_lock_init(&rfbi.cmd_lock);
1160 rfbi.cmd_fifo = kfifo_alloc(RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL, 1161 r = kfifo_alloc(&rfbi.cmd_fifo, RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL);
1161 &rfbi.cmd_lock); 1162 if (r)
1162 if (IS_ERR(rfbi.cmd_fifo)) 1163 return r;
1163 return -ENOMEM;
1164 1164
1165 init_completion(&rfbi.cmd_done); 1165 init_completion(&rfbi.cmd_done);
1166 atomic_set(&rfbi.cmd_fifo_full, 0); 1166 atomic_set(&rfbi.cmd_fifo_full, 0);
@@ -1196,7 +1196,7 @@ void rfbi_exit(void)
1196{ 1196{
1197 DSSDBG("rfbi_exit\n"); 1197 DSSDBG("rfbi_exit\n");
1198 1198
1199 kfifo_free(rfbi.cmd_fifo); 1199 kfifo_free(&rfbi.cmd_fifo);
1200 1200
1201 iounmap(rfbi.base); 1201 iounmap(rfbi.base);
1202} 1202}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index ef299839858a..d17caef6915a 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -1311,6 +1311,7 @@ static void omapfb_free_fbmem(struct fb_info *fbi)
1311 if (rg->vrfb.vaddr[0]) { 1311 if (rg->vrfb.vaddr[0]) {
1312 iounmap(rg->vrfb.vaddr[0]); 1312 iounmap(rg->vrfb.vaddr[0]);
1313 omap_vrfb_release_ctx(&rg->vrfb); 1313 omap_vrfb_release_ctx(&rg->vrfb);
1314 rg->vrfb.vaddr[0] = NULL;
1314 } 1315 }
1315 } 1316 }
1316 1317
@@ -2114,6 +2115,11 @@ static int omapfb_probe(struct platform_device *pdev)
2114 dssdev = NULL; 2115 dssdev = NULL;
2115 for_each_dss_dev(dssdev) { 2116 for_each_dss_dev(dssdev) {
2116 omap_dss_get_device(dssdev); 2117 omap_dss_get_device(dssdev);
2118 if (!dssdev->driver) {
2119 dev_err(&pdev->dev, "no driver for display\n");
2120 r = -EINVAL;
2121 goto cleanup;
2122 }
2117 fbdev->displays[fbdev->num_displays++] = dssdev; 2123 fbdev->displays[fbdev->num_displays++] = dssdev;
2118 } 2124 }
2119 2125
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 415858b421b3..825b665245bb 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1221,9 +1221,9 @@ static void setup_smart_timing(struct pxafb_info *fbi,
1221static int pxafb_smart_thread(void *arg) 1221static int pxafb_smart_thread(void *arg)
1222{ 1222{
1223 struct pxafb_info *fbi = arg; 1223 struct pxafb_info *fbi = arg;
1224 struct pxafb_mach_info *inf; 1224 struct pxafb_mach_info *inf = fbi->dev->platform_data;
1225 1225
1226 if (!fbi || !fbi->dev->platform_data->smart_update) { 1226 if (!inf->smart_update) {
1227 pr_err("%s: not properly initialized, thread terminated\n", 1227 pr_err("%s: not properly initialized, thread terminated\n",
1228 __func__); 1228 __func__);
1229 return -EINVAL; 1229 return -EINVAL;
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index adf9632c6b1f..53cb722c45a0 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -211,21 +211,23 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
211 211
212/** 212/**
213 * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock. 213 * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
214 * @id: window id.
215 * @sfb: The hardware state. 214 * @sfb: The hardware state.
216 * @pixclock: The pixel clock wanted, in picoseconds. 215 * @pixclock: The pixel clock wanted, in picoseconds.
217 * 216 *
218 * Given the specified pixel clock, work out the necessary divider to get 217 * Given the specified pixel clock, work out the necessary divider to get
219 * close to the output frequency. 218 * close to the output frequency.
220 */ 219 */
221static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk) 220static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
222{ 221{
223 struct s3c_fb_pd_win *win = sfb->pdata->win[id];
224 unsigned long clk = clk_get_rate(sfb->bus_clk); 222 unsigned long clk = clk_get_rate(sfb->bus_clk);
223 unsigned long long tmp;
225 unsigned int result; 224 unsigned int result;
226 225
227 pixclk *= win->win_mode.refresh; 226 tmp = (unsigned long long)clk;
228 result = clk / pixclk; 227 tmp *= pixclk;
228
229 do_div(tmp, 1000000000UL);
230 result = (unsigned int)tmp / 1000;
229 231
230 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", 232 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
231 pixclk, clk, result, clk / result); 233 pixclk, clk, result, clk / result);
@@ -301,7 +303,7 @@ static int s3c_fb_set_par(struct fb_info *info)
301 /* use window 0 as the basis for the lcd output timings */ 303 /* use window 0 as the basis for the lcd output timings */
302 304
303 if (win_no == 0) { 305 if (win_no == 0) {
304 clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock); 306 clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
305 307
306 data = sfb->pdata->vidcon0; 308 data = sfb->pdata->vidcon0;
307 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); 309 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
diff --git a/drivers/video/via/accel.c b/drivers/video/via/accel.c
index 9d4f3a49ba4a..d5077dfa9e00 100644
--- a/drivers/video/via/accel.c
+++ b/drivers/video/via/accel.c
@@ -137,7 +137,7 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
137 tmp, dst_pitch); 137 tmp, dst_pitch);
138 return -EINVAL; 138 return -EINVAL;
139 } 139 }
140 tmp = (tmp >> 3) | (dst_pitch << (16 - 3)); 140 tmp = VIA_PITCH_ENABLE | (tmp >> 3) | (dst_pitch << (16 - 3));
141 writel(tmp, engine + 0x38); 141 writel(tmp, engine + 0x38);
142 142
143 if (op == VIA_BITBLT_FILL) 143 if (op == VIA_BITBLT_FILL)
@@ -352,6 +352,9 @@ int viafb_init_engine(struct fb_info *info)
352 viapar->shared->vq_vram_addr = viapar->fbmem_free; 352 viapar->shared->vq_vram_addr = viapar->fbmem_free;
353 viapar->fbmem_used += VQ_SIZE; 353 viapar->fbmem_used += VQ_SIZE;
354 354
355 /* Init 2D engine reg to reset 2D engine */
356 writel(0x0, engine + VIA_REG_KEYCONTROL);
357
355 /* Init AGP and VQ regs */ 358 /* Init AGP and VQ regs */
356 switch (chip_name) { 359 switch (chip_name) {
357 case UNICHROME_K8M890: 360 case UNICHROME_K8M890:
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index d8df17a7d5fc..3028e7ddc3b5 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -177,16 +177,15 @@ static int viafb_set_par(struct fb_info *info)
177 } 177 }
178 178
179 if (vmode_index != VIA_RES_INVALID) { 179 if (vmode_index != VIA_RES_INVALID) {
180 viafb_setmode(vmode_index, info->var.xres, info->var.yres,
181 info->var.bits_per_pixel, vmode_index1,
182 viafb_second_xres, viafb_second_yres, viafb_bpp1);
183
184 viafb_update_fix(info); 180 viafb_update_fix(info);
185 viafb_bpp = info->var.bits_per_pixel; 181 viafb_bpp = info->var.bits_per_pixel;
186 if (info->var.accel_flags & FB_ACCELF_TEXT) 182 if (info->var.accel_flags & FB_ACCELF_TEXT)
187 info->flags &= ~FBINFO_HWACCEL_DISABLED; 183 info->flags &= ~FBINFO_HWACCEL_DISABLED;
188 else 184 else
189 info->flags |= FBINFO_HWACCEL_DISABLED; 185 info->flags |= FBINFO_HWACCEL_DISABLED;
186 viafb_setmode(vmode_index, info->var.xres, info->var.yres,
187 info->var.bits_per_pixel, vmode_index1,
188 viafb_second_xres, viafb_second_yres, viafb_bpp1);
190 } 189 }
191 190
192 return 0; 191 return 0;
@@ -872,7 +871,9 @@ static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
872 if (info->flags & FBINFO_HWACCEL_DISABLED || info != viafbinfo) 871 if (info->flags & FBINFO_HWACCEL_DISABLED || info != viafbinfo)
873 return -ENODEV; 872 return -ENODEV;
874 873
875 if (chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2) 874 /* LCD ouput does not support hw cursors (at least on VN896) */
875 if ((chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2) ||
876 viafb_LCD_ON)
876 return -ENODEV; 877 return -ENODEV;
877 878
878 viafb_show_hw_cursor(info, HW_Cursor_OFF); 879 viafb_show_hw_cursor(info, HW_Cursor_OFF);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 9dd588042880..505be88c82ae 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -266,7 +266,7 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
266 266
267static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST }; 267static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST };
268 268
269static struct virtio_driver virtio_balloon = { 269static struct virtio_driver virtio_balloon_driver = {
270 .feature_table = features, 270 .feature_table = features,
271 .feature_table_size = ARRAY_SIZE(features), 271 .feature_table_size = ARRAY_SIZE(features),
272 .driver.name = KBUILD_MODNAME, 272 .driver.name = KBUILD_MODNAME,
@@ -279,12 +279,12 @@ static struct virtio_driver virtio_balloon = {
279 279
280static int __init init(void) 280static int __init init(void)
281{ 281{
282 return register_virtio_driver(&virtio_balloon); 282 return register_virtio_driver(&virtio_balloon_driver);
283} 283}
284 284
285static void __exit fini(void) 285static void __exit fini(void)
286{ 286{
287 unregister_virtio_driver(&virtio_balloon); 287 unregister_virtio_driver(&virtio_balloon_driver);
288} 288}
289module_init(init); 289module_init(init);
290module_exit(fini); 290module_exit(fini);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 088f32f29a6e..050ee147592f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -396,8 +396,8 @@ config SBC_FITPC2_WATCHDOG
396 tristate "Compulab SBC-FITPC2 watchdog" 396 tristate "Compulab SBC-FITPC2 watchdog"
397 depends on X86 397 depends on X86
398 ---help--- 398 ---help---
399 This is the driver for the built-in watchdog timer on the fit-PC2 399 This is the driver for the built-in watchdog timer on the fit-PC2,
400 Single-board computer made by Compulab. 400 fit-PC2i, CM-iAM single-board computers made by Compulab.
401 401
402 It`s possible to enable watchdog timer either from BIOS (F2) or from booted Linux. 402 It`s possible to enable watchdog timer either from BIOS (F2) or from booted Linux.
403 When "Watchdog Timer Value" enabled one can set 31-255 s operational range. 403 When "Watchdog Timer Value" enabled one can set 31-255 s operational range.
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index c8a3bec26830..4bdb7f1a9077 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -29,8 +29,9 @@
29 * document number 313056-003, 313057-017: 82801H (ICH8) 29 * document number 313056-003, 313057-017: 82801H (ICH8)
30 * document number 316972-004, 316973-012: 82801I (ICH9) 30 * document number 316972-004, 316973-012: 82801I (ICH9)
31 * document number 319973-002, 319974-002: 82801J (ICH10) 31 * document number 319973-002, 319974-002: 82801J (ICH10)
32 * document number 322169-001, 322170-001: 5 Series, 3400 Series (PCH) 32 * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
33 * document number 320066-003, 320257-008: EP80597 (IICH) 33 * document number 320066-003, 320257-008: EP80597 (IICH)
34 * document number TBD : Cougar Point (CPT)
34 */ 35 */
35 36
36/* 37/*
@@ -100,8 +101,22 @@ enum iTCO_chipsets {
100 TCO_ICH10DO, /* ICH10DO */ 101 TCO_ICH10DO, /* ICH10DO */
101 TCO_PCH, /* PCH Desktop Full Featured */ 102 TCO_PCH, /* PCH Desktop Full Featured */
102 TCO_PCHM, /* PCH Mobile Full Featured */ 103 TCO_PCHM, /* PCH Mobile Full Featured */
104 TCO_P55, /* P55 */
105 TCO_PM55, /* PM55 */
106 TCO_H55, /* H55 */
107 TCO_QM57, /* QM57 */
108 TCO_H57, /* H57 */
109 TCO_HM55, /* HM55 */
110 TCO_Q57, /* Q57 */
111 TCO_HM57, /* HM57 */
103 TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */ 112 TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */
113 TCO_QS57, /* QS57 */
114 TCO_3400, /* 3400 */
115 TCO_3420, /* 3420 */
116 TCO_3450, /* 3450 */
104 TCO_EP80579, /* EP80579 */ 117 TCO_EP80579, /* EP80579 */
118 TCO_CPTD, /* CPT Desktop */
119 TCO_CPTM, /* CPT Mobile */
105}; 120};
106 121
107static struct { 122static struct {
@@ -144,8 +159,22 @@ static struct {
144 {"ICH10DO", 2}, 159 {"ICH10DO", 2},
145 {"PCH Desktop Full Featured", 2}, 160 {"PCH Desktop Full Featured", 2},
146 {"PCH Mobile Full Featured", 2}, 161 {"PCH Mobile Full Featured", 2},
162 {"P55", 2},
163 {"PM55", 2},
164 {"H55", 2},
165 {"QM57", 2},
166 {"H57", 2},
167 {"HM55", 2},
168 {"Q57", 2},
169 {"HM57", 2},
147 {"PCH Mobile SFF Full Featured", 2}, 170 {"PCH Mobile SFF Full Featured", 2},
171 {"QS57", 2},
172 {"3400", 2},
173 {"3420", 2},
174 {"3450", 2},
148 {"EP80579", 2}, 175 {"EP80579", 2},
176 {"CPT Desktop", 2},
177 {"CPT Mobile", 2},
149 {NULL, 0} 178 {NULL, 0}
150}; 179};
151 180
@@ -216,8 +245,22 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
216 { ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)}, 245 { ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)},
217 { ITCO_PCI_DEVICE(0x3b00, TCO_PCH)}, 246 { ITCO_PCI_DEVICE(0x3b00, TCO_PCH)},
218 { ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)}, 247 { ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)},
248 { ITCO_PCI_DEVICE(0x3b02, TCO_P55)},
249 { ITCO_PCI_DEVICE(0x3b03, TCO_PM55)},
250 { ITCO_PCI_DEVICE(0x3b06, TCO_H55)},
251 { ITCO_PCI_DEVICE(0x3b07, TCO_QM57)},
252 { ITCO_PCI_DEVICE(0x3b08, TCO_H57)},
253 { ITCO_PCI_DEVICE(0x3b09, TCO_HM55)},
254 { ITCO_PCI_DEVICE(0x3b0a, TCO_Q57)},
255 { ITCO_PCI_DEVICE(0x3b0b, TCO_HM57)},
219 { ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)}, 256 { ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)},
257 { ITCO_PCI_DEVICE(0x3b0f, TCO_QS57)},
258 { ITCO_PCI_DEVICE(0x3b12, TCO_3400)},
259 { ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
260 { ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
220 { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)}, 261 { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
262 { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)},
263 { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)},
221 { 0, }, /* End of list */ 264 { 0, }, /* End of list */
222}; 265};
223MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); 266MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c
index 4f4b35a20d84..3c79dc587958 100644
--- a/drivers/watchdog/ixp2000_wdt.c
+++ b/drivers/watchdog/ixp2000_wdt.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/timer.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/fs.h> 24#include <linux/fs.h>
24#include <linux/miscdevice.h> 25#include <linux/miscdevice.h>
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 91430a89107c..e6763d2a567b 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -46,9 +46,9 @@ static DEFINE_SPINLOCK(wdt_lock);
46static void wdt_send_data(unsigned char command, unsigned char data) 46static void wdt_send_data(unsigned char command, unsigned char data)
47{ 47{
48 outb(command, COMMAND_PORT); 48 outb(command, COMMAND_PORT);
49 mdelay(100); 49 msleep(100);
50 outb(data, DATA_PORT); 50 outb(data, DATA_PORT);
51 mdelay(200); 51 msleep(200);
52} 52}
53 53
54static void wdt_enable(void) 54static void wdt_enable(void)
@@ -202,11 +202,10 @@ static int __init fitpc2_wdt_init(void)
202{ 202{
203 int err; 203 int err;
204 204
205 if (strcmp("SBC-FITPC2", dmi_get_system_info(DMI_BOARD_NAME))) { 205 if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2"))
206 pr_info("board name is: %s. Should be SBC-FITPC2\n",
207 dmi_get_system_info(DMI_BOARD_NAME));
208 return -ENODEV; 206 return -ENODEV;
209 } 207
208 pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME));
210 209
211 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { 210 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) {
212 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); 211 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c4997930afc7..5d42d55e299b 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -102,15 +102,15 @@ static void do_suspend(void)
102 goto out_thaw; 102 goto out_thaw;
103 } 103 }
104 104
105 printk(KERN_DEBUG "suspending xenstore...\n");
106 xs_suspend();
107
105 err = dpm_suspend_noirq(PMSG_SUSPEND); 108 err = dpm_suspend_noirq(PMSG_SUSPEND);
106 if (err) { 109 if (err) {
107 printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); 110 printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
108 goto out_resume; 111 goto out_resume;
109 } 112 }
110 113
111 printk(KERN_DEBUG "suspending xenstore...\n");
112 xs_suspend();
113
114 err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); 114 err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
115 115
116 dpm_resume_noirq(PMSG_RESUME); 116 dpm_resume_noirq(PMSG_RESUME);
@@ -120,13 +120,13 @@ static void do_suspend(void)
120 cancelled = 1; 120 cancelled = 1;
121 } 121 }
122 122
123out_resume:
123 if (!cancelled) { 124 if (!cancelled) {
124 xen_arch_resume(); 125 xen_arch_resume();
125 xs_resume(); 126 xs_resume();
126 } else 127 } else
127 xs_suspend_cancel(); 128 xs_suspend_cancel();
128 129
129out_resume:
130 dpm_resume_end(PMSG_RESUME); 130 dpm_resume_end(PMSG_RESUME);
131 131
132 /* Make sure timer events get retriggered on all CPUs */ 132 /* Make sure timer events get retriggered on all CPUs */