aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:54 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:54 -0400
commit9134d02bc0af4a8747d448d1f811ec5f8eb96df6 (patch)
tree704c3e5dcc10f360815c4868a74711f82fb62e27 /drivers
parentbbb20089a3275a19e475dbc21320c3742e3ca423 (diff)
parent80ffb3cceaefa405f2ecd46d66500ed8d53efe74 (diff)
Merge commit 'md/for-linus' into async-tx-next
Conflicts: drivers/md/raid5.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_memhotplug.c34
-rw-r--r--drivers/acpi/acpica/acobject.h1
-rw-r--r--drivers/acpi/acpica/dsopcode.c24
-rw-r--r--drivers/acpi/acpica/exfldio.c6
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/system.c2
-rw-r--r--drivers/amba/bus.c12
-rw-r--r--drivers/ata/ahci.c5
-rw-r--r--drivers/ata/ata_piix.c3
-rw-r--r--drivers/ata/libata-core.c50
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/pata_at91.c70
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/base/devres.c3
-rw-r--r--drivers/base/firmware_class.c13
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/base/sys.c2
-rw-r--r--drivers/block/DAC960.c1
-rw-r--r--drivers/block/Kconfig16
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/cciss.c16
-rw-r--r--drivers/block/cciss_cmd.h1
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/loop.c1
-rw-r--r--drivers/block/mg_disk.c101
-rw-r--r--drivers/block/osdblk.c701
-rw-r--r--drivers/block/pktcdvd.c10
-rw-r--r--drivers/block/virtio_blk.c12
-rw-r--r--drivers/block/xsysace.c7
-rw-r--r--drivers/block/z2ram.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c1
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/parisc-agp.c2
-rw-r--r--drivers/char/amiserial.c1
-rw-r--r--drivers/char/bsr.c42
-rw-r--r--drivers/char/cyclades.c1
-rw-r--r--drivers/char/epca.c1
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/hw_random/intel-rng.c9
-rw-r--r--drivers/char/isicom.c17
-rw-r--r--drivers/char/istallion.c1
-rw-r--r--drivers/char/moxa.c1
-rw-r--r--drivers/char/mxser.c1
-rw-r--r--drivers/char/n_hdlc.c1
-rw-r--r--drivers/char/n_r3964.c1
-rw-r--r--drivers/char/n_tty.c4
-rw-r--r--drivers/char/nozomi.c16
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c4
-rw-r--r--drivers/char/pty.c155
-rw-r--r--drivers/char/rio/rio_linux.c1
-rw-r--r--drivers/char/riscom8.c1
-rw-r--r--drivers/char/rocket.c1
-rw-r--r--drivers/char/serial167.c1
-rw-r--r--drivers/char/specialix.c25
-rw-r--r--drivers/char/sx.c1
-rw-r--r--drivers/char/synclink.c1
-rw-r--r--drivers/char/synclink_gt.c1
-rw-r--r--drivers/char/synclinkmp.c1
-rw-r--r--drivers/char/sysrq.c8
-rw-r--r--drivers/char/tb0219.c4
-rw-r--r--drivers/char/tpm/tpm.c1
-rw-r--r--drivers/char/tty_buffer.c13
-rw-r--r--drivers/char/tty_ioctl.c1
-rw-r--r--drivers/char/tty_ldisc.c189
-rw-r--r--drivers/char/tty_port.c2
-rw-r--r--drivers/char/vc_screen.c4
-rw-r--r--drivers/char/vt.c13
-rw-r--r--drivers/char/vt_ioctl.c1
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/connector/cn_queue.c2
-rw-r--r--drivers/connector/connector.c4
-rw-r--r--drivers/cpufreq/cpufreq.c99
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c43
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c77
-rw-r--r--drivers/edac/amd64_edac.c34
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/edac_core.h4
-rw-r--r--drivers/edac/edac_mc_sysfs.c4
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/mpc85xx_edac.h1
-rw-r--r--drivers/edac/x38_edac.c2
-rw-r--r--drivers/firewire/core-card.c14
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/core-iso.c24
-rw-r--r--drivers/firewire/core.h3
-rw-r--r--drivers/firewire/sbp2.c10
-rw-r--r--drivers/gpio/Kconfig6
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/pl061.c20
-rw-r--r--drivers/gpio/vr41xx_giu.c (renamed from drivers/char/vr41xx_giu.c)335
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c8
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_gem.c17
-rw-r--r--drivers/gpu/drm/drm_stub.c3
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h4
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c25
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c25
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c25
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c87
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h43
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c59
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c46
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c242
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h124
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c253
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c54
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h45
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c42
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1226
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1318
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h144
-rw-r--r--drivers/gpu/drm/i915/intel_dp_i2c.c273
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h20
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c16
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c18
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c85
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c430
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c14
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c438
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h1
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c75
-rw-r--r--drivers/gpu/drm/radeon/Makefile3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c293
-rw-r--r--drivers/gpu/drm/radeon/r100.c773
-rw-r--r--drivers/gpu/drm/radeon/r300.c78
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c22
-rw-r--r--drivers/gpu/drm/radeon/r600.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon.h87
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h32
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c74
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c359
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c103
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c687
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c188
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h51
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c188
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_share.h39
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c209
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c24
-rw-r--r--drivers/gpu/drm/radeon/rs400.c30
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rs690.c479
-rw-r--r--drivers/gpu/drm/radeon/rs690r.h99
-rw-r--r--drivers/gpu/drm/radeon/rv515.c799
-rw-r--r--drivers/gpu/drm/radeon/rv515r.h170
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c70
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c70
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c26
-rw-r--r--drivers/gpu/drm/via/via_irq.c6
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c4
-rw-r--r--drivers/hwmon/abituguru3.c6
-rw-r--r--drivers/hwmon/asus_atk0110.c6
-rw-r--r--drivers/hwmon/max6650.c1
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/smsc47m1.c11
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c26
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c9
-rw-r--r--drivers/i2c/busses/i2c-omap.c48
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c5
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c4
-rw-r--r--drivers/i2c/busses/i2c-simtec.c2
-rw-r--r--drivers/i2c/chips/tsl2550.c17
-rw-r--r--drivers/ide/cs5520.c1
-rw-r--r--drivers/ide/ide-acpi.c37
-rw-r--r--drivers/ide/ide-cd.c24
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-disk.c1
-rw-r--r--drivers/ide/ide-dma.c21
-rw-r--r--drivers/ide/ide-eh.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-io.c14
-rw-r--r--drivers/ide/ide-ioctls.c3
-rw-r--r--drivers/ide/ide-iops.c4
-rw-r--r--drivers/ide/ide-pm.c30
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/ide/ide-tape.c1
-rw-r--r--drivers/ieee1394/sbp2.c1
-rw-r--r--drivers/ieee1394/sbp2.h8
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/joydev.c2
-rw-r--r--drivers/input/joystick/xpad.c26
-rw-r--r--drivers/input/keyboard/Kconfig294
-rw-r--r--drivers/input/keyboard/Makefile33
-rw-r--r--drivers/input/keyboard/atkbd.c32
-rw-r--r--drivers/input/keyboard/gpio_keys.c33
-rw-r--r--drivers/input/keyboard/matrix_keypad.c453
-rw-r--r--drivers/input/misc/cobalt_btns.c4
-rw-r--r--drivers/input/misc/pcspkr.c8
-rw-r--r--drivers/input/misc/wistron_btns.c25
-rw-r--r--drivers/input/mouse/gpio_mouse.c11
-rw-r--r--drivers/input/serio/hp_sdc_mlc.c2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h36
-rw-r--r--drivers/input/serio/i8042.c7
-rw-r--r--drivers/input/serio/serio.c7
-rw-r--r--drivers/input/tablet/wacom_wac.c6
-rw-r--r--drivers/isdn/gigaset/ev-layer.c44
-rw-r--r--drivers/isdn/gigaset/interface.c2
-rw-r--r--drivers/isdn/gigaset/isocdata.c6
-rw-r--r--drivers/isdn/hisax/hfc_usb.c1
-rw-r--r--drivers/isdn/i4l/isdn_tty.c1
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c6
-rw-r--r--drivers/isdn/mISDN/stack.c1
-rw-r--r--drivers/leds/Kconfig14
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-alix2.c7
-rw-r--r--drivers/leds/leds-bd2802.c96
-rw-r--r--drivers/leds/leds-cobalt-raq.c2
-rw-r--r--drivers/leds/leds-gpio.c22
-rw-r--r--drivers/leds/leds-lp3944.c466
-rw-r--r--drivers/leds/leds-pca9532.c58
-rw-r--r--drivers/lguest/core.c119
-rw-r--r--drivers/lguest/hypercalls.c145
-rw-r--r--drivers/lguest/interrupts_and_traps.c288
-rw-r--r--drivers/lguest/lg.h36
-rw-r--r--drivers/lguest/lguest_device.c160
-rw-r--r--drivers/lguest/lguest_user.c236
-rw-r--r--drivers/lguest/page_tables.c489
-rw-r--r--drivers/lguest/segments.c106
-rw-r--r--drivers/lguest/x86/core.c374
-rw-r--r--drivers/lguest/x86/switcher_32.S22
-rw-r--r--drivers/macintosh/macio_asic.c11
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-delay.c4
-rw-r--r--drivers/md/dm-exception-store.c9
-rw-r--r--drivers/md/dm-linear.c2
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid1.c3
-rw-r--r--drivers/md/dm-stripe.c7
-rw-r--r--drivers/md/dm-table.c17
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/linear.c6
-rw-r--r--drivers/md/md.c251
-rw-r--r--drivers/md/md.h12
-rw-r--r--drivers/md/multipath.c12
-rw-r--r--drivers/md/raid0.c10
-rw-r--r--drivers/md/raid1.c16
-rw-r--r--drivers/md/raid10.c23
-rw-r--r--drivers/md/raid5.c87
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c13
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c67
-rw-r--r--drivers/media/dvb/bt8xx/dst_ca.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.h1
-rw-r--r--drivers/media/dvb/frontends/af9013.c25
-rw-r--r--drivers/media/dvb/ttpci/Kconfig1
-rw-r--r--drivers/media/dvb/ttpci/av7110.c1
-rw-r--r--drivers/media/radio/radio-mr800.c1
-rw-r--r--drivers/media/radio/radio-si470x.c6
-rw-r--r--drivers/media/video/Kconfig8
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c92
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/video/bt8xx/bttv.h1
-rw-r--r--drivers/media/video/cx18/cx18-cards.c34
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c160
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c5
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c30
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c1
-rw-r--r--drivers/media/video/cx23885/cx23885.h4
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c1
-rw-r--r--drivers/media/video/cx88/cx88-video.c1
-rw-r--r--drivers/media/video/dabusb.c1
-rw-r--r--drivers/media/video/em28xx/Kconfig2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c182
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c18
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c74
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c84
-rw-r--r--drivers/media/video/em28xx/em28xx.h32
-rw-r--r--drivers/media/video/gspca/Kconfig16
-rw-r--r--drivers/media/video/gspca/Makefile2
-rw-r--r--drivers/media/video/gspca/conex.c2
-rw-r--r--drivers/media/video/gspca/gspca.c73
-rw-r--r--drivers/media/video/gspca/gspca.h9
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c6
-rw-r--r--drivers/media/video/gspca/mars.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c2434
-rw-r--r--drivers/media/video/gspca/sonixj.c4
-rw-r--r--drivers/media/video/gspca/spca500.c2
-rw-r--r--drivers/media/video/gspca/stk014.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.h4
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c16
-rw-r--r--drivers/media/video/gspca/sunplus.c2
-rw-r--r--drivers/media/video/gspca/zc3xx.c2
-rw-r--r--drivers/media/video/mt9v011.c496
-rw-r--r--drivers/media/video/mt9v011.h35
-rw-r--r--drivers/media/video/pwc/pwc-if.c1
-rw-r--r--drivers/media/video/pwc/pwc.h1
-rw-r--r--drivers/media/video/s2255drv.c1
-rw-r--r--drivers/media/video/saa5246a.c1
-rw-r--r--drivers/media/video/saa5249.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c1
-rw-r--r--drivers/media/video/se401.c1
-rw-r--r--drivers/media/video/soc_camera.c12
-rw-r--r--drivers/media/video/stk-webcam.c1
-rw-r--r--drivers/media/video/stradis.c1
-rw-r--r--drivers/media/video/stv680.c1
-rw-r--r--drivers/media/video/usbvideo/vicam.c1
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c1
-rw-r--r--drivers/media/video/v4l2-dev.c1
-rw-r--r--drivers/media/video/vivi.c99
-rw-r--r--drivers/media/video/zoran/zoran_driver.c1
-rw-r--r--drivers/mfd/dm355evm_msp.c3
-rw-r--r--drivers/mfd/ezx-pcap.c4
-rw-r--r--drivers/mfd/sm501.c3
-rw-r--r--drivers/mfd/twl4030-irq.c55
-rw-r--r--drivers/misc/cb710/sgbuf2.c4
-rw-r--r--drivers/misc/eeprom/at25.c4
-rw-r--r--drivers/misc/sgi-gru/grufile.c1
-rw-r--r--drivers/misc/sgi-gru/grukservices.c1
-rw-r--r--drivers/misc/sgi-xp/xpnet.c4
-rw-r--r--drivers/mmc/host/cb710-mmc.c6
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mmc/host/mvsdio.c4
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/sdhci-of.c10
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c2
-rw-r--r--drivers/mtd/inftlcore.c11
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/integrator-flash.c22
-rw-r--r--drivers/mtd/maps/sbc8240.c250
-rw-r--r--drivers/mtd/mtd_blkdevs.c6
-rw-r--r--drivers/mtd/mtdblock.c16
-rw-r--r--drivers/mtd/mtdcore.c7
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c7
-rw-r--r--drivers/mtd/nftlcore.c16
-rw-r--r--drivers/mtd/onenand/omap2.c3
-rw-r--r--drivers/mtd/ubi/build.c6
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/debug.h7
-rw-r--r--drivers/mtd/ubi/gluebi.c1
-rw-r--r--drivers/mtd/ubi/io.c157
-rw-r--r--drivers/mtd/ubi/scan.c14
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/mtd/ubi/ubi-media.h12
-rw-r--r--drivers/mtd/ubi/ubi.h6
-rw-r--r--drivers/mtd/ubi/wl.c8
-rw-r--r--drivers/net/3c515.c4
-rw-r--r--drivers/net/3c59x.c10
-rw-r--r--drivers/net/8139too.c1
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/a2065.c12
-rw-r--r--drivers/net/arcnet/arcnet.c26
-rw-r--r--drivers/net/arm/Kconfig8
-rw-r--r--drivers/net/arm/Makefile1
-rw-r--r--drivers/net/arm/at91_ether.c3
-rw-r--r--drivers/net/arm/ixp4xx_eth.c4
-rw-r--r--drivers/net/arm/w90p910_ether.c1105
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/atl1c/atl1c.h8
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/benet/be.h2
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/benet/be_hw.h4
-rw-r--r--drivers/net/benet/be_main.c69
-rw-r--r--drivers/net/bmac.c7
-rw-r--r--drivers/net/bnx2x.h2
-rw-r--r--drivers/net/bnx2x_link.c3
-rw-r--r--drivers/net/bnx2x_main.c29
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/sja1000/sja1000.c1
-rw-r--r--drivers/net/cnic.c23
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/cs89x0.c7
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c3
-rw-r--r--drivers/net/davinci_emac.c12
-rw-r--r--drivers/net/dl2k.c17
-rw-r--r--drivers/net/e100.c3
-rw-r--r--drivers/net/e1000/e1000_main.c11
-rw-r--r--drivers/net/e1000e/defines.h3
-rw-r--r--drivers/net/e1000e/hw.h4
-rw-r--r--drivers/net/e1000e/ich8lan.c270
-rw-r--r--drivers/net/e1000e/lib.c6
-rw-r--r--drivers/net/e1000e/netdev.c3
-rw-r--r--drivers/net/e1000e/phy.c12
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/eexpress.c6
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c5
-rw-r--r--drivers/net/epic100.c5
-rw-r--r--drivers/net/fealnx.c18
-rw-r--r--drivers/net/fec.c1
-rw-r--r--drivers/net/fec.h12
-rw-r--r--drivers/net/forcedeth.c25
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c20
-rw-r--r--drivers/net/fsl_pq_mdio.c8
-rw-r--r--drivers/net/gianfar.c26
-rw-r--r--drivers/net/gianfar_ethtool.c10
-rw-r--r--drivers/net/hamachi.c23
-rw-r--r--drivers/net/hamradio/6pack.c2
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/baycom_par.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/ibm_newemac/rgmii.c7
-rw-r--r--drivers/net/igb/e1000_82575.c4
-rw-r--r--drivers/net/igb/igb_main.c91
-rw-r--r--drivers/net/igbvf/vf.c4
-rw-r--r--drivers/net/irda/bfin_sir.c16
-rw-r--r--drivers/net/irda/irtty-sir.c1
-rw-r--r--drivers/net/isa-skeleton.c5
-rw-r--r--drivers/net/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c67
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c36
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c165
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h8
-rw-r--r--drivers/net/jazzsonic.c1
-rw-r--r--drivers/net/ks8851.c1322
-rw-r--r--drivers/net/ks8851.h296
-rw-r--r--drivers/net/macsonic.c15
-rw-r--r--drivers/net/mdio.c4
-rw-r--r--drivers/net/mlx4/cmd.c5
-rw-r--r--drivers/net/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/mlx4/en_tx.c1
-rw-r--r--drivers/net/mlx4/main.c6
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/ne.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h20
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c13
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c9
-rw-r--r--drivers/net/netxen/netxen_nic_init.c123
-rw-r--r--drivers/net/netxen/netxen_nic_main.c77
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/3c589_cs.c21
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c13
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c15
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/pcnet32.c80
-rw-r--r--drivers/net/phy/mdio-gpio.c77
-rw-r--r--drivers/net/phy/phy.c25
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/plip.c3
-rw-r--r--drivers/net/ppp_async.c1
-rw-r--r--drivers/net/ppp_generic.c34
-rw-r--r--drivers/net/ppp_synctty.c1
-rw-r--r--drivers/net/pppoe.c1
-rw-r--r--drivers/net/pppol2tp.c1
-rw-r--r--drivers/net/ps3_gelic_net.c1
-rw-r--r--drivers/net/ps3_gelic_wireless.c1
-rw-r--r--drivers/net/qlge/qlge.h2
-rw-r--r--drivers/net/qlge/qlge_ethtool.c4
-rw-r--r--drivers/net/qlge/qlge_main.c153
-rw-r--r--drivers/net/qlge/qlge_mpi.c6
-rw-r--r--drivers/net/r6040.c9
-rw-r--r--drivers/net/r8169.c13
-rw-r--r--drivers/net/s6gmac.c2
-rw-r--r--drivers/net/sc92031.c1
-rw-r--r--drivers/net/sh_eth.c9
-rw-r--r--drivers/net/skge.c4
-rw-r--r--drivers/net/sky2.c39
-rw-r--r--drivers/net/sky2.h1
-rw-r--r--drivers/net/smc91x.c1
-rw-r--r--drivers/net/smc91x.h3
-rw-r--r--drivers/net/smsc911x.c7
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/sundance.c6
-rw-r--r--drivers/net/sunvnet.c2
-rw-r--r--drivers/net/tokenring/ibmtr.c2
-rw-r--r--drivers/net/tsi108_eth.c8
-rw-r--r--drivers/net/tulip/de2104x.c7
-rw-r--r--drivers/net/tulip/de4x5.c6
-rw-r--r--drivers/net/tulip/tulip_core.c14
-rw-r--r--drivers/net/tulip/winbond-840.c6
-rw-r--r--drivers/net/tun.c20
-rw-r--r--drivers/net/ucc_geth.c23
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cdc-phonet.c461
-rw-r--r--drivers/net/usb/cdc_eem.c4
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/kaweth.c3
-rw-r--r--drivers/net/usb/net1080.c12
-rw-r--r--drivers/net/usb/pegasus.c3
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc95xx.c10
-rw-r--r--drivers/net/usb/usbnet.c30
-rw-r--r--drivers/net/veth.c41
-rw-r--r--drivers/net/via-rhine.c1
-rw-r--r--drivers/net/wan/hd64570.c3
-rw-r--r--drivers/net/wan/hd64572.c3
-rw-r--r--drivers/net/wan/sbni.c8
-rw-r--r--drivers/net/wireless/airo.c13
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/ath/regd.c17
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c7
-rw-r--r--drivers/net/wireless/b43/pcmcia.c1
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h1
-rw-r--r--drivers/net/wireless/b43legacy/main.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c11
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c8
-rw-r--r--drivers/net/wireless/libertas/11d.c2
-rw-r--r--drivers/net/wireless/libertas/assoc.c18
-rw-r--r--drivers/net/wireless/libertas/cmd.c8
-rw-r--r--drivers/net/wireless/libertas/defs.h2
-rw-r--r--drivers/net/wireless/libertas/scan.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c7
-rw-r--r--drivers/net/wireless/orinoco/main.c3
-rw-r--r--drivers/net/wireless/p54/p54common.c51
-rw-r--r--drivers/net/wireless/p54/p54spi.c2
-rw-r--r--drivers/net/wireless/ray_cs.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c3
-rw-r--r--drivers/net/wireless/wavelan_cs.c13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/yellowfin.c23
-rw-r--r--drivers/of/of_mdio.c42
-rw-r--r--drivers/oprofile/oprofile_stats.c1
-rw-r--r--drivers/parisc/ccio-dma.c8
-rw-r--r--drivers/parisc/dino.c14
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/eisa_enumerator.c14
-rw-r--r--drivers/parisc/gsc.c4
-rw-r--r--drivers/parisc/gsc.h2
-rw-r--r--drivers/parisc/hppb.c9
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/lba_pci.c41
-rw-r--r--drivers/parisc/pdc_stable.c2
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/parisc/superio.c6
-rw-r--r--drivers/parport/parport_pc.c5
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c1
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c1
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c1
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c1
-rw-r--r--drivers/pci/intel-iommu.c800
-rw-r--r--drivers/pci/iova.c26
-rw-r--r--drivers/pci/msi.c64
-rw-r--r--drivers/pci/msi.h10
-rw-r--r--drivers/pci/pci.c15
-rw-r--r--drivers/pci/pcie/aer/ecrc.c2
-rw-r--r--drivers/pci/quirks.c5
-rw-r--r--drivers/pci/setup-res.c4
-rw-r--r--drivers/pci/slot.c4
-rw-r--r--drivers/pci/syscall.c1
-rw-r--r--drivers/pcmcia/tcic.c3
-rw-r--r--drivers/pcmcia/vrc4171_card.c4
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c4
-rw-r--r--drivers/pcmcia/vrc4173_cardu.h2
-rw-r--r--drivers/platform/x86/Kconfig26
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c355
-rw-r--r--drivers/platform/x86/hp-wmi.c14
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c390
-rw-r--r--drivers/power/Kconfig7
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ds2782_battery.c330
-rw-r--r--drivers/power/olpc_battery.c26
-rw-r--r--drivers/power/wm97xx_battery.c4
-rw-r--r--drivers/rtc/rtc-bfin.c30
-rw-r--r--drivers/rtc/rtc-cmos.c23
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-vr41xx.c4
-rw-r--r--drivers/s390/block/dasd_eckd.c45
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_fba.c25
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dasd_ioctl.c1
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/monreader.c4
-rw-r--r--drivers/s390/char/sclp_rw.h5
-rw-r--r--drivers/s390/char/vmwatchdog.c8
-rw-r--r--drivers/s390/crypto/ap_bus.c9
-rw-r--r--drivers/s390/scsi/zfcp_erp.c68
-rw-r--r--drivers/s390/scsi/zfcp_fc.c8
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c56
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c25
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c7
-rw-r--r--drivers/scsi/atari_NCR5380.c3
-rw-r--r--drivers/scsi/cxgb3i/Kbuild2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c4
-rw-r--r--drivers/scsi/fnic/fnic_main.c8
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/libfc/fc_exch.c23
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_expander.c147
-rw-r--r--drivers/scsi/libsas/sas_port.c19
-rw-r--r--drivers/scsi/mac53c94.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c15
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h9
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c133
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c145
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c40
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sd.c20
-rw-r--r--drivers/scsi/sg.c10
-rw-r--r--drivers/scsi/sun3_NCR5380.c3
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/serial/8250_pci.c17
-rw-r--r--drivers/serial/atmel_serial.c2
-rw-r--r--drivers/serial/bfin_sport_uart.c1
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm2.c2
-rw-r--r--drivers/serial/msm_serial.c1
-rw-r--r--drivers/serial/s3c2400.c8
-rw-r--r--drivers/serial/s3c2410.c8
-rw-r--r--drivers/serial/s3c2412.c8
-rw-r--r--drivers/serial/s3c2440.c8
-rw-r--r--drivers/serial/s3c24a0.c8
-rw-r--r--drivers/serial/s3c6400.c8
-rw-r--r--drivers/serial/serial_ks8695.c2
-rw-r--r--drivers/serial/sh-sci.c11
-rw-r--r--drivers/serial/vr41xx_siu.c2
-rw-r--r--drivers/spi/omap2_mcspi.c32
-rw-r--r--drivers/spi/omap_uwire.c2
-rw-r--r--drivers/spi/spi_bitbang.c24
-rw-r--r--drivers/spi/spidev.c17
-rw-r--r--drivers/ssb/driver_mipscore.c85
-rw-r--r--drivers/ssb/pcmcia.c10
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/lowmemorykiller.c8
-rw-r--r--drivers/staging/b3dfg/Kconfig1
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/s626.c35
-rw-r--r--drivers/staging/go7007/s2250-loader.c1
-rw-r--r--drivers/staging/heci/Kconfig1
-rw-r--r--drivers/staging/meilhaus/TODO2
-rw-r--r--drivers/staging/rspiusb/rspiusb.c16
-rw-r--r--drivers/staging/rt2860/rt_linux.h13
-rw-r--r--drivers/staging/rt2870/2870_main_dev.c67
-rw-r--r--drivers/staging/rt2870/common/2870_rtmp_init.c33
-rw-r--r--drivers/staging/rt2870/common/rtusb_io.c3
-rw-r--r--drivers/staging/rt2870/rt2870.h10
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c12
-rw-r--r--drivers/staging/rtl8192su/Kconfig2
-rw-r--r--drivers/staging/rtl8192su/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_module.c1
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c14
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c3
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c23
-rw-r--r--drivers/staging/rtl8192su/r8192U_pm.c8
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c26
-rw-r--r--drivers/staging/stlc45xx/stlc45xx.c27
-rw-r--r--drivers/staging/uc2322/Kconfig10
-rw-r--r--drivers/staging/uc2322/Makefile1
-rw-r--r--drivers/staging/uc2322/TODO7
-rw-r--r--drivers/staging/uc2322/aten2011.c2430
-rw-r--r--drivers/staging/udlfb/udlfb.c1
-rw-r--r--drivers/staging/usbip/usbip_common.c1
-rw-r--r--drivers/staging/vt6655/device_main.c10
-rw-r--r--drivers/telephony/ixj.c1
-rw-r--r--drivers/telephony/phonedev.c1
-rw-r--r--drivers/usb/class/cdc-acm.c37
-rw-r--r--drivers/usb/class/cdc-acm.h2
-rw-r--r--drivers/usb/class/cdc-wdm.c1
-rw-r--r--drivers/usb/class/usbtmc.c10
-rw-r--r--drivers/usb/core/Kconfig2
-rw-r--r--drivers/usb/core/config.c48
-rw-r--r--drivers/usb/core/devices.c10
-rw-r--r--drivers/usb/core/devio.c88
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hcd.h4
-rw-r--r--drivers/usb/core/hub.c40
-rw-r--r--drivers/usb/core/hub.h6
-rw-r--r--drivers/usb/core/message.c63
-rw-r--r--drivers/usb/gadget/Kconfig43
-rw-r--r--drivers/usb/gadget/amd5536udc.c1
-rw-r--r--drivers/usb/gadget/audio.c6
-rw-r--r--drivers/usb/gadget/ether.c11
-rw-r--r--drivers/usb/gadget/langwell_udc.c1
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c24
-rw-r--r--drivers/usb/gadget/rndis.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c1
-rw-r--r--drivers/usb/host/Kconfig33
-rw-r--r--drivers/usb/host/ehci-au1xxx.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c40
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c2
-rw-r--r--drivers/usb/host/ehci-orion.c4
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-ps3.c2
-rw-r--r--drivers/usb/host/ehci-q.c155
-rw-r--r--drivers/usb/host/ehci-sched.c13
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/fhci-sched.c8
-rw-r--r--drivers/usb/host/isp1760-if.c2
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/r8a66597-hcd.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c199
-rw-r--r--drivers/usb/host/xhci-hcd.c290
-rw-r--r--drivers/usb/host/xhci-mem.c300
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c305
-rw-r--r--drivers/usb/host/xhci.h148
-rw-r--r--drivers/usb/misc/Kconfig2
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/rio500.c1
-rw-r--r--drivers/usb/misc/usblcd.c1
-rw-r--r--drivers/usb/mon/mon_bin.c2
-rw-r--r--drivers/usb/musb/Kconfig1
-rw-r--r--drivers/usb/musb/cppi_dma.h1
-rw-r--r--drivers/usb/musb/davinci.c32
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/musb/musb_core.h1
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c2
-rw-r--r--drivers/usb/musb/musb_host.c27
-rw-r--r--drivers/usb/musb/musb_regs.h1
-rw-r--r--drivers/usb/otg/Kconfig14
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/langwell_otg.c1915
-rw-r--r--drivers/usb/otg/nop-usb-xceiv.c1
-rw-r--r--drivers/usb/serial/console.c13
-rw-r--r--drivers/usb/serial/cp210x.c5
-rw-r--r--drivers/usb/serial/cypress_m8.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c74
-rw-r--r--drivers/usb/serial/ftdi_sio.h39
-rw-r--r--drivers/usb/serial/generic.c20
-rw-r--r--drivers/usb/serial/mos7720.c2
-rw-r--r--drivers/usb/serial/mos7840.c10
-rw-r--r--drivers/usb/serial/option.c178
-rw-r--r--drivers/usb/serial/pl2303.c60
-rw-r--r--drivers/usb/serial/pl2303.h8
-rw-r--r--drivers/usb/serial/sierra.c51
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c7
-rw-r--r--drivers/usb/serial/usb-serial.c43
-rw-r--r--drivers/usb/storage/option_ms.c3
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/video/Kconfig5
-rw-r--r--drivers/video/amba-clcd.c7
-rw-r--r--drivers/video/atafb.c8
-rw-r--r--drivers/video/atmel_lcdfb.c3
-rw-r--r--drivers/video/aty/atyfb.h3
-rw-r--r--drivers/video/aty/atyfb_base.c141
-rw-r--r--drivers/video/aty/mach64_accel.c7
-rw-r--r--drivers/video/backlight/jornada720_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c2
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/cobalt_lcdfb.c2
-rw-r--r--drivers/video/console/fbcon.c6
-rw-r--r--drivers/video/console/fbcon_rotate.h2
-rw-r--r--drivers/video/console/sticore.c9
-rw-r--r--drivers/video/fbmem.c14
-rw-r--r--drivers/video/fbmon.c4
-rw-r--r--drivers/video/fsl-diu-fb.c20
-rw-r--r--drivers/video/hitfb.c66
-rw-r--r--drivers/video/i810/i810_main.c5
-rw-r--r--drivers/video/matrox/matroxfb_DAC1064.c4
-rw-r--r--drivers/video/matrox/matroxfb_Ti3026.c4
-rw-r--r--drivers/video/matrox/matroxfb_base.c4
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c7
-rw-r--r--drivers/video/mx3fb.c92
-rw-r--r--drivers/video/omap/omapfb_main.c18
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pxafb.c2
-rw-r--r--drivers/video/s3c-fb.c21
-rw-r--r--drivers/video/sh7760fb.c19
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c91
-rw-r--r--drivers/video/sis/sis_main.c3
-rw-r--r--drivers/video/sm501fb.c26
-rw-r--r--drivers/video/stifb.c7
-rw-r--r--drivers/video/via/hw.c4
-rw-r--r--drivers/video/via/lcd.c15
-rw-r--r--drivers/video/via/viafbdev.c101
-rw-r--r--drivers/video/via/viafbdev.h3
-rw-r--r--drivers/video/w100fb.c4
-rw-r--r--drivers/virtio/virtio_pci.c242
-rw-r--r--drivers/vlynq/Kconfig2
-rw-r--r--drivers/vlynq/vlynq.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c1
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c2
-rw-r--r--drivers/watchdog/coh901327_wdt.c11
-rw-r--r--drivers/watchdog/ep93xx_wdt.c1
-rw-r--r--drivers/watchdog/ks8695_wdt.c4
-rw-r--r--drivers/watchdog/sa1100_wdt.c5
-rw-r--r--drivers/watchdog/w83627hf_wdt.c5
-rw-r--r--drivers/watchdog/w83697ug_wdt.c4
-rw-r--r--drivers/watchdog/wdrtas.c8
-rw-r--r--drivers/xen/events.c4
847 files changed, 26605 insertions, 13037 deletions
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 7a0f4aa4fa1e..9a62224cc278 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -38,6 +38,9 @@
38 38
39#define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT 39#define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT
40 40
41#undef PREFIX
42#define PREFIX "ACPI:memory_hp:"
43
41ACPI_MODULE_NAME("acpi_memhotplug"); 44ACPI_MODULE_NAME("acpi_memhotplug");
42MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>"); 45MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>");
43MODULE_DESCRIPTION("Hotplug Mem Driver"); 46MODULE_DESCRIPTION("Hotplug Mem Driver");
@@ -153,6 +156,7 @@ acpi_memory_get_device(acpi_handle handle,
153 acpi_handle phandle; 156 acpi_handle phandle;
154 struct acpi_device *device = NULL; 157 struct acpi_device *device = NULL;
155 struct acpi_device *pdevice = NULL; 158 struct acpi_device *pdevice = NULL;
159 int result;
156 160
157 161
158 if (!acpi_bus_get_device(handle, &device) && device) 162 if (!acpi_bus_get_device(handle, &device) && device)
@@ -165,9 +169,9 @@ acpi_memory_get_device(acpi_handle handle,
165 } 169 }
166 170
167 /* Get the parent device */ 171 /* Get the parent device */
168 status = acpi_bus_get_device(phandle, &pdevice); 172 result = acpi_bus_get_device(phandle, &pdevice);
169 if (ACPI_FAILURE(status)) { 173 if (result) {
170 ACPI_EXCEPTION((AE_INFO, status, "Cannot get acpi bus device")); 174 printk(KERN_WARNING PREFIX "Cannot get acpi bus device");
171 return -EINVAL; 175 return -EINVAL;
172 } 176 }
173 177
@@ -175,9 +179,9 @@ acpi_memory_get_device(acpi_handle handle,
175 * Now add the notified device. This creates the acpi_device 179 * Now add the notified device. This creates the acpi_device
176 * and invokes .add function 180 * and invokes .add function
177 */ 181 */
178 status = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); 182 result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE);
179 if (ACPI_FAILURE(status)) { 183 if (result) {
180 ACPI_EXCEPTION((AE_INFO, status, "Cannot add acpi bus")); 184 printk(KERN_WARNING PREFIX "Cannot add acpi bus");
181 return -EINVAL; 185 return -EINVAL;
182 } 186 }
183 187
@@ -238,7 +242,12 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
238 num_enabled++; 242 num_enabled++;
239 continue; 243 continue;
240 } 244 }
241 245 /*
246 * If the memory block size is zero, please ignore it.
247 * Don't try to do the following memory hotplug flowchart.
248 */
249 if (!info->length)
250 continue;
242 if (node < 0) 251 if (node < 0)
243 node = memory_add_physaddr_to_nid(info->start_addr); 252 node = memory_add_physaddr_to_nid(info->start_addr);
244 253
@@ -253,8 +262,15 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
253 mem_device->state = MEMORY_INVALID_STATE; 262 mem_device->state = MEMORY_INVALID_STATE;
254 return -EINVAL; 263 return -EINVAL;
255 } 264 }
256 265 /*
257 return result; 266 * Sometimes the memory device will contain several memory blocks.
267 * When one memory block is hot-added to the system memory, it will
268 * be regarded as a success.
269 * Otherwise if the last memory block can't be hot-added to the system
270 * memory, it will be failure and the memory device can't be bound with
271 * driver.
272 */
273 return 0;
258} 274}
259 275
260static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device) 276static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 544dcf834922..eb6f038b03d9 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -97,6 +97,7 @@
97#define AOPOBJ_OBJECT_INITIALIZED 0x08 97#define AOPOBJ_OBJECT_INITIALIZED 0x08
98#define AOPOBJ_SETUP_COMPLETE 0x10 98#define AOPOBJ_SETUP_COMPLETE 0x10
99#define AOPOBJ_SINGLE_DATUM 0x20 99#define AOPOBJ_SINGLE_DATUM 0x20
100#define AOPOBJ_INVALID 0x40 /* Used if host OS won't allow an op_region address */
100 101
101/****************************************************************************** 102/******************************************************************************
102 * 103 *
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 584d766e6f12..b79978f7bc71 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -397,6 +397,30 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
397 status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node), 397 status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
398 extra_desc->extra.aml_length, 398 extra_desc->extra.aml_length,
399 extra_desc->extra.aml_start); 399 extra_desc->extra.aml_start);
400 if (ACPI_FAILURE(status)) {
401 return_ACPI_STATUS(status);
402 }
403
404 /* Validate the region address/length via the host OS */
405
406 status = acpi_os_validate_address(obj_desc->region.space_id,
407 obj_desc->region.address,
408 (acpi_size) obj_desc->region.length,
409 acpi_ut_get_node_name(node));
410
411 if (ACPI_FAILURE(status)) {
412 /*
413 * Invalid address/length. We will emit an error message and mark
414 * the region as invalid, so that it will cause an additional error if
415 * it is ever used. Then return AE_OK.
416 */
417 ACPI_EXCEPTION((AE_INFO, status,
418 "During address validation of OpRegion [%4.4s]",
419 node->name.ascii));
420 obj_desc->common.flags |= AOPOBJ_INVALID;
421 status = AE_OK;
422 }
423
400 return_ACPI_STATUS(status); 424 return_ACPI_STATUS(status);
401} 425}
402 426
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index d4075b821021..6687be167f5f 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -113,6 +113,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
113 } 113 }
114 } 114 }
115 115
116 /* Exit if Address/Length have been disallowed by the host OS */
117
118 if (rgn_desc->common.flags & AOPOBJ_INVALID) {
119 return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
120 }
121
116 /* 122 /*
117 * Exit now for SMBus address space, it has a non-linear address space 123 * Exit now for SMBus address space, it has a non-linear address space
118 * and the request cannot be directly validated 124 * and the request cannot be directly validated
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 71670719d61a..5691f165a952 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -189,11 +189,36 @@ acpi_status __init acpi_os_initialize(void)
189 return AE_OK; 189 return AE_OK;
190} 190}
191 191
192static void bind_to_cpu0(struct work_struct *work)
193{
194 set_cpus_allowed(current, cpumask_of_cpu(0));
195 kfree(work);
196}
197
198static void bind_workqueue(struct workqueue_struct *wq)
199{
200 struct work_struct *work;
201
202 work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
203 INIT_WORK(work, bind_to_cpu0);
204 queue_work(wq, work);
205}
206
192acpi_status acpi_os_initialize1(void) 207acpi_status acpi_os_initialize1(void)
193{ 208{
209 /*
210 * On some machines, a software-initiated SMI causes corruption unless
211 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
212 * typically it's done in GPE-related methods that are run via
213 * workqueues, so we can avoid the known corruption cases by binding
214 * the workqueues to CPU 0.
215 */
194 kacpid_wq = create_singlethread_workqueue("kacpid"); 216 kacpid_wq = create_singlethread_workqueue("kacpid");
217 bind_workqueue(kacpid_wq);
195 kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); 218 kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
219 bind_workqueue(kacpi_notify_wq);
196 kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); 220 kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
221 bind_workqueue(kacpi_hotplug_wq);
197 BUG_ON(!kacpid_wq); 222 BUG_ON(!kacpid_wq);
198 BUG_ON(!kacpi_notify_wq); 223 BUG_ON(!kacpi_notify_wq);
199 BUG_ON(!kacpi_hotplug_wq); 224 BUG_ON(!kacpi_hotplug_wq);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 8a5bf3b356fa..55b5b90c2a44 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -395,7 +395,7 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
395 fn = adr & 0xffff; 395 fn = adr & 0xffff;
396 396
397 pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn)); 397 pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn));
398 if (hnd == handle) 398 if (!pdev || hnd == handle)
399 break; 399 break;
400 400
401 pbus = pdev->subordinate; 401 pbus = pdev->subordinate;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 01574a066534..42159a28f433 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -397,6 +397,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
397 }, 397 },
398 }, 398 },
399 { 399 {
400 .callback = init_set_sci_en_on_resume,
401 .ident = "Hewlett-Packard HP G7000 Notebook PC",
402 .matches = {
403 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
404 DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
405 },
406 },
407 {
400 .callback = init_old_suspend_ordering, 408 .callback = init_old_suspend_ordering,
401 .ident = "Panasonic CF51-2L", 409 .ident = "Panasonic CF51-2L",
402 .matches = { 410 .matches = {
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 0944daec064f..9c61ab2177cf 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -121,7 +121,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
121 table_attr->attr.size = 0; 121 table_attr->attr.size = 0;
122 table_attr->attr.read = acpi_table_show; 122 table_attr->attr.read = acpi_table_show;
123 table_attr->attr.attr.name = table_attr->name; 123 table_attr->attr.attr.name = table_attr->name;
124 table_attr->attr.attr.mode = 0444; 124 table_attr->attr.attr.mode = 0400;
125 125
126 return; 126 return;
127} 127}
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 3d763fdf99b7..246650673010 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -207,6 +207,16 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
207 void __iomem *tmp; 207 void __iomem *tmp;
208 int i, ret; 208 int i, ret;
209 209
210 device_initialize(&dev->dev);
211
212 /*
213 * Copy from device_add
214 */
215 if (dev->dev.init_name) {
216 dev_set_name(&dev->dev, "%s", dev->dev.init_name);
217 dev->dev.init_name = NULL;
218 }
219
210 dev->dev.release = amba_device_release; 220 dev->dev.release = amba_device_release;
211 dev->dev.bus = &amba_bustype; 221 dev->dev.bus = &amba_bustype;
212 dev->dev.dma_mask = &dev->dma_mask; 222 dev->dev.dma_mask = &dev->dma_mask;
@@ -240,7 +250,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
240 goto err_release; 250 goto err_release;
241 } 251 }
242 252
243 ret = device_register(&dev->dev); 253 ret = device_add(&dev->dev);
244 if (ret) 254 if (ret)
245 goto err_release; 255 goto err_release;
246 256
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 15a23031833f..958c1fa41900 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -513,11 +513,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
513 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 513 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
514 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 514 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
515 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 515 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
516 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
516 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 517 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
518 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
519 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
517 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ 520 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
518 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ 521 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
522 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
519 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 523 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
520 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ 524 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
525 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
521 526
522 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 527 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
523 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 528 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d0a14cf2bd74..56b8a3ff1286 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -596,9 +596,12 @@ static const struct ich_laptop ich_laptop[] = {
596 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ 596 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
597 { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ 597 { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
598 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ 598 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
599 { 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */
599 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ 600 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
600 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ 601 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
602 { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unkown HP */
601 { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ 603 { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
604 { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
602 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ 605 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
603 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ 606 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
604 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ 607 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 045a486a09ea..8ac98ff16d7d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1515,6 +1515,7 @@ static int ata_hpa_resize(struct ata_device *dev)
1515 1515
1516 return rc; 1516 return rc;
1517 } 1517 }
1518 dev->n_native_sectors = native_sectors;
1518 1519
1519 /* nothing to do? */ 1520 /* nothing to do? */
1520 if (native_sectors <= sectors || !ata_ignore_hpa) { 1521 if (native_sectors <= sectors || !ata_ignore_hpa) {
@@ -3392,17 +3393,27 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3392 3393
3393static int ata_dev_set_mode(struct ata_device *dev) 3394static int ata_dev_set_mode(struct ata_device *dev)
3394{ 3395{
3396 struct ata_port *ap = dev->link->ap;
3395 struct ata_eh_context *ehc = &dev->link->eh_context; 3397 struct ata_eh_context *ehc = &dev->link->eh_context;
3398 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3396 const char *dev_err_whine = ""; 3399 const char *dev_err_whine = "";
3397 int ign_dev_err = 0; 3400 int ign_dev_err = 0;
3398 unsigned int err_mask; 3401 unsigned int err_mask = 0;
3399 int rc; 3402 int rc;
3400 3403
3401 dev->flags &= ~ATA_DFLAG_PIO; 3404 dev->flags &= ~ATA_DFLAG_PIO;
3402 if (dev->xfer_shift == ATA_SHIFT_PIO) 3405 if (dev->xfer_shift == ATA_SHIFT_PIO)
3403 dev->flags |= ATA_DFLAG_PIO; 3406 dev->flags |= ATA_DFLAG_PIO;
3404 3407
3405 err_mask = ata_dev_set_xfermode(dev); 3408 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3409 dev_err_whine = " (SET_XFERMODE skipped)";
3410 else {
3411 if (nosetxfer)
3412 ata_dev_printk(dev, KERN_WARNING,
3413 "NOSETXFER but PATA detected - can't "
3414 "skip SETXFER, might malfunction\n");
3415 err_mask = ata_dev_set_xfermode(dev);
3416 }
3406 3417
3407 if (err_mask & ~AC_ERR_DEV) 3418 if (err_mask & ~AC_ERR_DEV)
3408 goto fail; 3419 goto fail;
@@ -4089,6 +4100,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4089 unsigned int readid_flags) 4100 unsigned int readid_flags)
4090{ 4101{
4091 u64 n_sectors = dev->n_sectors; 4102 u64 n_sectors = dev->n_sectors;
4103 u64 n_native_sectors = dev->n_native_sectors;
4092 int rc; 4104 int rc;
4093 4105
4094 if (!ata_dev_enabled(dev)) 4106 if (!ata_dev_enabled(dev))
@@ -4118,16 +4130,30 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4118 /* verify n_sectors hasn't changed */ 4130 /* verify n_sectors hasn't changed */
4119 if (dev->class == ATA_DEV_ATA && n_sectors && 4131 if (dev->class == ATA_DEV_ATA && n_sectors &&
4120 dev->n_sectors != n_sectors) { 4132 dev->n_sectors != n_sectors) {
4121 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " 4133 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch "
4122 "%llu != %llu\n", 4134 "%llu != %llu\n",
4123 (unsigned long long)n_sectors, 4135 (unsigned long long)n_sectors,
4124 (unsigned long long)dev->n_sectors); 4136 (unsigned long long)dev->n_sectors);
4125 4137 /*
4126 /* restore original n_sectors */ 4138 * Something could have caused HPA to be unlocked
4127 dev->n_sectors = n_sectors; 4139 * involuntarily. If n_native_sectors hasn't changed
4128 4140 * and the new size matches it, keep the device.
4129 rc = -ENODEV; 4141 */
4130 goto fail; 4142 if (dev->n_native_sectors == n_native_sectors &&
4143 dev->n_sectors > n_sectors &&
4144 dev->n_sectors == n_native_sectors) {
4145 ata_dev_printk(dev, KERN_WARNING,
4146 "new n_sectors matches native, probably "
4147 "late HPA unlock, continuing\n");
4148 /* keep using the old n_sectors */
4149 dev->n_sectors = n_sectors;
4150 } else {
4151 /* restore original n_[native]_sectors and fail */
4152 dev->n_native_sectors = n_native_sectors;
4153 dev->n_sectors = n_sectors;
4154 rc = -ENODEV;
4155 goto fail;
4156 }
4131 } 4157 }
4132 4158
4133 return 0; 4159 return 0;
@@ -4297,6 +4323,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4297 /* Devices which aren't very happy with higher link speeds */ 4323 /* Devices which aren't very happy with higher link speeds */
4298 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4324 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4299 4325
4326 /*
4327 * Devices which choke on SETXFER. Applies only if both the
4328 * device and controller are SATA.
4329 */
4330 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
4331
4300 /* End Marker */ 4332 /* End Marker */
4301 { } 4333 { }
4302}; 4334};
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index fa22f94ca415..79711b64054b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2327,7 +2327,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2327 struct ata_port *ap = link->ap; 2327 struct ata_port *ap = link->ap;
2328 struct ata_link *slave = ap->slave_link; 2328 struct ata_link *slave = ap->slave_link;
2329 struct ata_eh_context *ehc = &link->eh_context; 2329 struct ata_eh_context *ehc = &link->eh_context;
2330 struct ata_eh_context *sehc = &slave->eh_context; 2330 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2331 unsigned int *classes = ehc->classes; 2331 unsigned int *classes = ehc->classes;
2332 unsigned int lflags = link->flags; 2332 unsigned int lflags = link->flags;
2333 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2333 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
@@ -2517,6 +2517,10 @@ int ata_eh_reset(struct ata_link *link, int classify,
2517 2517
2518 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2518 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2519 rc = ata_do_reset(link, reset, classes, deadline, true); 2519 rc = ata_do_reset(link, reset, classes, deadline, true);
2520 if (rc) {
2521 failed_link = link;
2522 goto fail;
2523 }
2520 } 2524 }
2521 } else { 2525 } else {
2522 if (verbose) 2526 if (verbose)
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 4b27617be26d..5702affcb325 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -26,9 +26,7 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/ata_platform.h> 27#include <linux/ata_platform.h>
28 28
29#include <mach/at91sam9260_matrix.h>
30#include <mach/at91sam9_smc.h> 29#include <mach/at91sam9_smc.h>
31#include <mach/at91sam9260.h>
32#include <mach/board.h> 30#include <mach/board.h>
33#include <mach/gpio.h> 31#include <mach/gpio.h>
34 32
@@ -44,65 +42,62 @@ struct at91_ide_info {
44 unsigned long mode; 42 unsigned long mode;
45 unsigned int cs; 43 unsigned int cs;
46 44
45 struct clk *mck;
46
47 void __iomem *ide_addr; 47 void __iomem *ide_addr;
48 void __iomem *alt_addr; 48 void __iomem *alt_addr;
49}; 49};
50 50
51const struct ata_timing initial_timing = 51static const struct ata_timing initial_timing =
52 {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; 52 {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
53 53
54static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz) 54static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz)
55{ 55{
56 unsigned long mul; 56 unsigned long mul;
57 57
58 /* 58 /*
59 * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] = 59 * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] =
60 * x * (f / 1_000_000_000) = 60 * x * (f / 1_000_000_000) =
61 * x * ((f * 65536) / 1_000_000_000) / 65536 = 61 * x * ((f * 65536) / 1_000_000_000) / 65536 =
62 * x * (((f / 10_000) * 65536) / 100_000) / 65536 = 62 * x * (((f / 10_000) * 65536) / 100_000) / 65536 =
63 */ 63 */
64 64
65 mul = (mck_hz / 10000) << 16; 65 mul = (mck_hz / 10000) << 16;
66 mul /= 100000; 66 mul /= 100000;
67 67
68 return (ns * mul + 65536) >> 16; /* rounding */ 68 return (ns * mul + 65536) >> 16; /* rounding */
69} 69}
70 70
71static void set_smc_mode(struct at91_ide_info *info) 71static void set_smc_mode(struct at91_ide_info *info)
72{ 72{
73 at91_sys_write(AT91_SMC_MODE(info->cs), info->mode); 73 at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
74 return; 74 return;
75} 75}
76 76
77static void set_smc_timing(struct device *dev, 77static void set_smc_timing(struct device *dev,
78 struct at91_ide_info *info, const struct ata_timing *ata) 78 struct at91_ide_info *info, const struct ata_timing *ata)
79{ 79{
80 int read_cycle, write_cycle, active, recover; 80 unsigned long read_cycle, write_cycle, active, recover;
81 int nrd_setup, nrd_pulse, nrd_recover; 81 unsigned long nrd_setup, nrd_pulse, nrd_recover;
82 int nwe_setup, nwe_pulse; 82 unsigned long nwe_setup, nwe_pulse;
83 83
84 int ncs_write_setup, ncs_write_pulse; 84 unsigned long ncs_write_setup, ncs_write_pulse;
85 int ncs_read_setup, ncs_read_pulse; 85 unsigned long ncs_read_setup, ncs_read_pulse;
86 86
87 unsigned int mck_hz; 87 unsigned long mck_hz;
88 struct clk *mck;
89 88
90 read_cycle = ata->cyc8b; 89 read_cycle = ata->cyc8b;
91 nrd_setup = ata->setup; 90 nrd_setup = ata->setup;
92 nrd_pulse = ata->act8b; 91 nrd_pulse = ata->act8b;
93 nrd_recover = ata->rec8b; 92 nrd_recover = ata->rec8b;
94 93
95 mck = clk_get(NULL, "mck"); 94 mck_hz = clk_get_rate(info->mck);
96 BUG_ON(IS_ERR(mck));
97 mck_hz = clk_get_rate(mck);
98 95
99 read_cycle = calc_mck_cycles(read_cycle, mck_hz); 96 read_cycle = calc_mck_cycles(read_cycle, mck_hz);
100 nrd_setup = calc_mck_cycles(nrd_setup, mck_hz); 97 nrd_setup = calc_mck_cycles(nrd_setup, mck_hz);
101 nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz); 98 nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz);
102 nrd_recover = calc_mck_cycles(nrd_recover, mck_hz); 99 nrd_recover = calc_mck_cycles(nrd_recover, mck_hz);
103 100
104 clk_put(mck);
105
106 active = nrd_setup + nrd_pulse; 101 active = nrd_setup + nrd_pulse;
107 recover = read_cycle - active; 102 recover = read_cycle - active;
108 103
@@ -121,13 +116,13 @@ static void set_smc_timing(struct device *dev,
121 ncs_write_setup = ncs_read_setup; 116 ncs_write_setup = ncs_read_setup;
122 ncs_write_pulse = ncs_read_pulse; 117 ncs_write_pulse = ncs_read_pulse;
123 118
124 dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n", 119 dev_dbg(dev, "ATA timings: nrd_setup = %lu nrd_pulse = %lu nrd_cycle = %lu\n",
125 nrd_setup, nrd_pulse, read_cycle); 120 nrd_setup, nrd_pulse, read_cycle);
126 dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n", 121 dev_dbg(dev, "ATA timings: nwe_setup = %lu nwe_pulse = %lu nwe_cycle = %lu\n",
127 nwe_setup, nwe_pulse, write_cycle); 122 nwe_setup, nwe_pulse, write_cycle);
128 dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n", 123 dev_dbg(dev, "ATA timings: ncs_read_setup = %lu ncs_read_pulse = %lu\n",
129 ncs_read_setup, ncs_read_pulse); 124 ncs_read_setup, ncs_read_pulse);
130 dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n", 125 dev_dbg(dev, "ATA timings: ncs_write_setup = %lu ncs_write_pulse = %lu\n",
131 ncs_write_setup, ncs_write_pulse); 126 ncs_write_setup, ncs_write_pulse);
132 127
133 at91_sys_write(AT91_SMC_SETUP(info->cs), 128 at91_sys_write(AT91_SMC_SETUP(info->cs),
@@ -217,6 +212,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
217 struct resource *mem_res; 212 struct resource *mem_res;
218 struct ata_host *host; 213 struct ata_host *host;
219 struct ata_port *ap; 214 struct ata_port *ap;
215
220 int irq_flags = 0; 216 int irq_flags = 0;
221 int irq = 0; 217 int irq = 0;
222 int ret; 218 int ret;
@@ -261,6 +257,13 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
261 return -ENOMEM; 257 return -ENOMEM;
262 } 258 }
263 259
260 info->mck = clk_get(NULL, "mck");
261
262 if (IS_ERR(info->mck)) {
263 dev_err(dev, "failed to get access to mck clock\n");
264 return -ENODEV;
265 }
266
264 info->cs = board->chipselect; 267 info->cs = board->chipselect;
265 info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | 268 info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
266 AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT | 269 AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT |
@@ -304,6 +307,7 @@ err_alt_ioremap:
304 devm_iounmap(dev, info->ide_addr); 307 devm_iounmap(dev, info->ide_addr);
305 308
306err_ide_ioremap: 309err_ide_ioremap:
310 clk_put(info->mck);
307 kfree(info); 311 kfree(info);
308 312
309 return ret; 313 return ret;
@@ -312,11 +316,12 @@ err_ide_ioremap:
312static int __devexit pata_at91_remove(struct platform_device *pdev) 316static int __devexit pata_at91_remove(struct platform_device *pdev)
313{ 317{
314 struct ata_host *host = dev_get_drvdata(&pdev->dev); 318 struct ata_host *host = dev_get_drvdata(&pdev->dev);
315 struct at91_ide_info *info = host->private_data; 319 struct at91_ide_info *info;
316 struct device *dev = &pdev->dev; 320 struct device *dev = &pdev->dev;
317 321
318 if (!host) 322 if (!host)
319 return 0; 323 return 0;
324 info = host->private_data;
320 325
321 ata_host_detach(host); 326 ata_host_detach(host);
322 327
@@ -325,6 +330,7 @@ static int __devexit pata_at91_remove(struct platform_device *pdev)
325 330
326 devm_iounmap(dev, info->ide_addr); 331 devm_iounmap(dev, info->ide_addr);
327 devm_iounmap(dev, info->alt_addr); 332 devm_iounmap(dev, info->alt_addr);
333 clk_put(info->mck);
328 334
329 kfree(info); 335 kfree(info);
330 return 0; 336 return 0;
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 8d9343accf3c..abdd19fe990a 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -653,7 +653,8 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
653 653
654 ap = host->ports[i]; 654 ap = host->ports[i];
655 ocd = ap->dev->platform_data; 655 ocd = ap->dev->platform_data;
656 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) 656
657 if (ap->flags & ATA_FLAG_DISABLED)
657 continue; 658 continue;
658 659
659 ocd = ap->dev->platform_data; 660 ocd = ap->dev->platform_data;
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index f4d009ed50ac..dc99e26f8e5b 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -411,6 +411,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
411 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), 411 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
412 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), 412 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
413 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), 413 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
414 PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591),
414 PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), 415 PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
415 PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), 416 PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
416 PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), 417 PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 23714aefb825..c19417e02208 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2514,7 +2514,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2514 char *when = "idle"; 2514 char *when = "idle";
2515 2515
2516 ata_ehi_clear_desc(ehi); 2516 ata_ehi_clear_desc(ehi);
2517 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 2517 if (ap->flags & ATA_FLAG_DISABLED) {
2518 when = "disabled"; 2518 when = "disabled";
2519 } else if (edma_was_enabled) { 2519 } else if (edma_was_enabled) {
2520 when = "EDMA enabled"; 2520 when = "EDMA enabled";
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 030ec079b184..35bd5cc7f285 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -532,7 +532,7 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
532 struct ata_port *ap = host->ports[i]; 532 struct ata_port *ap = host->ports[i];
533 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 533 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
534 534
535 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) 535 if (unlikely(ap->flags & ATA_FLAG_DISABLED))
536 continue; 536 continue;
537 537
538 /* turn off SATA_IRQ if not supported */ 538 /* turn off SATA_IRQ if not supported */
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index e8beb8e5b626..05dd307e8f02 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -428,6 +428,9 @@ int devres_release_all(struct device *dev)
428{ 428{
429 unsigned long flags; 429 unsigned long flags;
430 430
431 /* Looks like an uninitialized device structure */
432 if (WARN_ON(dev->devres_head.next == NULL))
433 return -ENODEV;
431 spin_lock_irqsave(&dev->devres_lock, flags); 434 spin_lock_irqsave(&dev->devres_lock, flags);
432 return release_nodes(dev, dev->devres_head.next, &dev->devres_head, 435 return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
433 flags); 436 flags);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index ddeb819c8f87..7376367bcb80 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -180,7 +180,6 @@ static ssize_t firmware_loading_store(struct device *dev,
180 goto err; 180 goto err;
181 } 181 }
182 /* Pages will be freed by vfree() */ 182 /* Pages will be freed by vfree() */
183 fw_priv->pages = NULL;
184 fw_priv->page_array_size = 0; 183 fw_priv->page_array_size = 0;
185 fw_priv->nr_pages = 0; 184 fw_priv->nr_pages = 0;
186 complete(&fw_priv->completion); 185 complete(&fw_priv->completion);
@@ -217,8 +216,10 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
217 ret_count = -ENODEV; 216 ret_count = -ENODEV;
218 goto out; 217 goto out;
219 } 218 }
220 if (offset > fw->size) 219 if (offset > fw->size) {
221 return 0; 220 ret_count = 0;
221 goto out;
222 }
222 if (count > fw->size - offset) 223 if (count > fw->size - offset)
223 count = fw->size - offset; 224 count = fw->size - offset;
224 225
@@ -357,7 +358,7 @@ static void fw_dev_release(struct device *dev)
357 kfree(fw_priv->pages); 358 kfree(fw_priv->pages);
358 kfree(fw_priv->fw_id); 359 kfree(fw_priv->fw_id);
359 kfree(fw_priv); 360 kfree(fw_priv);
360 put_device(dev); 361 kfree(dev);
361 362
362 module_put(THIS_MODULE); 363 module_put(THIS_MODULE);
363} 364}
@@ -408,13 +409,11 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
408 if (retval) { 409 if (retval) {
409 dev_err(device, "%s: device_register failed\n", __func__); 410 dev_err(device, "%s: device_register failed\n", __func__);
410 put_device(f_dev); 411 put_device(f_dev);
411 goto error_kfree_fw_id; 412 return retval;
412 } 413 }
413 *dev_p = f_dev; 414 *dev_p = f_dev;
414 return 0; 415 return 0;
415 416
416error_kfree_fw_id:
417 kfree(fw_priv->fw_id);
418error_kfree: 417error_kfree:
419 kfree(f_dev); 418 kfree(f_dev);
420 kfree(fw_priv); 419 kfree(fw_priv);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index fae725458981..58a3e572f2c9 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -762,6 +762,7 @@ static int dpm_prepare(pm_message_t state)
762 dev->power.status = DPM_ON; 762 dev->power.status = DPM_ON;
763 if (error == -EAGAIN) { 763 if (error == -EAGAIN) {
764 put_device(dev); 764 put_device(dev);
765 error = 0;
765 continue; 766 continue;
766 } 767 }
767 printk(KERN_ERR "PM: Failed to prepare device %s " 768 printk(KERN_ERR "PM: Failed to prepare device %s "
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 79a9ae5238ac..0d903909af7e 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -275,9 +275,9 @@ int sysdev_register(struct sys_device *sysdev)
275 drv->add(sysdev); 275 drv->add(sysdev);
276 } 276 }
277 mutex_unlock(&sysdev_drivers_lock); 277 mutex_unlock(&sysdev_drivers_lock);
278 kobject_uevent(&sysdev->kobj, KOBJ_ADD);
278 } 279 }
279 280
280 kobject_uevent(&sysdev->kobj, KOBJ_ADD);
281 return error; 281 return error;
282} 282}
283 283
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 668dc234b8e2..1e6b7c14f697 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -36,6 +36,7 @@
36#include <linux/ioport.h> 36#include <linux/ioport.h>
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/smp_lock.h>
39#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
40#include <linux/reboot.h> 41#include <linux/reboot.h>
41#include <linux/spinlock.h> 42#include <linux/spinlock.h>
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index bb72ada9f074..1d886e079c58 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -298,6 +298,22 @@ config BLK_DEV_NBD
298 298
299 If unsure, say N. 299 If unsure, say N.
300 300
301config BLK_DEV_OSD
302 tristate "OSD object-as-blkdev support"
303 depends on SCSI_OSD_ULD
304 ---help---
305 Saying Y or M here will allow the exporting of a single SCSI
306 OSD (object-based storage) object as a Linux block device.
307
308 For example, if you create a 2G object on an OSD device,
309 you can then use this module to present that 2G object as
310 a Linux block device.
311
312 To compile this driver as a module, choose M here: the
313 module will be called osdblk.
314
315 If unsure, say N.
316
301config BLK_DEV_SX8 317config BLK_DEV_SX8
302 tristate "Promise SATA SX8 support" 318 tristate "Promise SATA SX8 support"
303 depends on PCI 319 depends on PCI
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 7755a5e2a85e..cdaa3f8fddf0 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
23obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o 23obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
24obj-$(CONFIG_MG_DISK) += mg_disk.o 24obj-$(CONFIG_MG_DISK) += mg_disk.o
25obj-$(CONFIG_SUNVDC) += sunvdc.o 25obj-$(CONFIG_SUNVDC) += sunvdc.o
26obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
26 27
27obj-$(CONFIG_BLK_DEV_UMEM) += umem.o 28obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
28obj-$(CONFIG_BLK_DEV_NBD) += nbd.o 29obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 9c6e5b0fe894..2f07b7c99a95 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1645,7 +1645,7 @@ static int __init fd_probe_drives(void)
1645{ 1645{
1646 int drive,drives,nomem; 1646 int drive,drives,nomem;
1647 1647
1648 printk(KERN_INFO "FD: probing units\n" KERN_INFO "found "); 1648 printk(KERN_INFO "FD: probing units\nfound ");
1649 drives=0; 1649 drives=0;
1650 nomem=0; 1650 nomem=0;
1651 for(drive=0;drive<FD_MAX_UNITS;drive++) { 1651 for(drive=0;drive<FD_MAX_UNITS;drive++) {
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index f5e7180d7f47..3ff02941b3dd 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1627,7 +1627,7 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
1627 drive, dtp->blocks, dtp->spt, dtp->stretch); 1627 drive, dtp->blocks, dtp->spt, dtp->stretch);
1628 1628
1629 /* sanity check */ 1629 /* sanity check */
1630 if (!dtp || setprm.track != dtp->blocks/dtp->spt/2 || 1630 if (setprm.track != dtp->blocks/dtp->spt/2 ||
1631 setprm.head != 2) { 1631 setprm.head != 2) {
1632 redo_fd_request(); 1632 redo_fd_request();
1633 return -EINVAL; 1633 return -EINVAL;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c7a527c08a09..a52cc7fe45ea 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -26,6 +26,7 @@
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/smp_lock.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
30#include <linux/major.h> 31#include <linux/major.h>
31#include <linux/fs.h> 32#include <linux/fs.h>
@@ -226,8 +227,18 @@ static inline void addQ(struct hlist_head *list, CommandList_struct *c)
226 227
227static inline void removeQ(CommandList_struct *c) 228static inline void removeQ(CommandList_struct *c)
228{ 229{
229 if (WARN_ON(hlist_unhashed(&c->list))) 230 /*
231 * After kexec/dump some commands might still
232 * be in flight, which the firmware will try
233 * to complete. Resetting the firmware doesn't work
234 * with old fw revisions, so we have to mark
235 * them off as 'stale' to prevent the driver from
236 * falling over.
237 */
238 if (WARN_ON(hlist_unhashed(&c->list))) {
239 c->cmd_type = CMD_MSG_STALE;
230 return; 240 return;
241 }
231 242
232 hlist_del_init(&c->list); 243 hlist_del_init(&c->list);
233} 244}
@@ -4246,7 +4257,8 @@ static void fail_all_cmds(unsigned long ctlr)
4246 while (!hlist_empty(&h->cmpQ)) { 4257 while (!hlist_empty(&h->cmpQ)) {
4247 c = hlist_entry(h->cmpQ.first, CommandList_struct, list); 4258 c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
4248 removeQ(c); 4259 removeQ(c);
4249 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 4260 if (c->cmd_type != CMD_MSG_STALE)
4261 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
4250 if (c->cmd_type == CMD_RWREQ) { 4262 if (c->cmd_type == CMD_RWREQ) {
4251 complete_command(h, c, 0); 4263 complete_command(h, c, 0);
4252 } else if (c->cmd_type == CMD_IOCTL_PEND) 4264 } else if (c->cmd_type == CMD_IOCTL_PEND)
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index cd665b00c7c5..dbaed1ea0da3 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -274,6 +274,7 @@ typedef struct _ErrorInfo_struct {
274#define CMD_SCSI 0x03 274#define CMD_SCSI 0x03
275#define CMD_MSG_DONE 0x04 275#define CMD_MSG_DONE 0x04
276#define CMD_MSG_TIMEOUT 0x05 276#define CMD_MSG_TIMEOUT 0x05
277#define CMD_MSG_STALE 0xff
277 278
278/* This structure needs to be divisible by 8 for new 279/* This structure needs to be divisible by 8 for new
279 * indexing method. 280 * indexing method.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 862b40c90181..91b753013780 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3327,7 +3327,10 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
3327 if (!capable(CAP_SYS_ADMIN)) 3327 if (!capable(CAP_SYS_ADMIN))
3328 return -EPERM; 3328 return -EPERM;
3329 mutex_lock(&open_lock); 3329 mutex_lock(&open_lock);
3330 LOCK_FDC(drive, 1); 3330 if (lock_fdc(drive, 1)) {
3331 mutex_unlock(&open_lock);
3332 return -EINTR;
3333 }
3331 floppy_type[type] = *g; 3334 floppy_type[type] = *g;
3332 floppy_type[type].name = "user format"; 3335 floppy_type[type].name = "user format";
3333 for (cnt = type << 2; cnt < (type << 2) + 4; cnt++) 3336 for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 801f4ab83302..5757188cd1fb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -61,7 +61,6 @@
61#include <linux/blkdev.h> 61#include <linux/blkdev.h>
62#include <linux/blkpg.h> 62#include <linux/blkpg.h>
63#include <linux/init.h> 63#include <linux/init.h>
64#include <linux/smp_lock.h>
65#include <linux/swap.h> 64#include <linux/swap.h>
66#include <linux/slab.h> 65#include <linux/slab.h>
67#include <linux/loop.h> 66#include <linux/loop.h>
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index f703f5478246..6d7fbaa92248 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -36,7 +36,6 @@
36 36
37/* Register offsets */ 37/* Register offsets */
38#define MG_BUFF_OFFSET 0x8000 38#define MG_BUFF_OFFSET 0x8000
39#define MG_STORAGE_BUFFER_SIZE 0x200
40#define MG_REG_OFFSET 0xC000 39#define MG_REG_OFFSET 0xC000
41#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ 40#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
42#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ 41#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
@@ -219,6 +218,16 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
219 host->error = MG_ERR_NONE; 218 host->error = MG_ERR_NONE;
220 expire = jiffies + msecs_to_jiffies(msec); 219 expire = jiffies + msecs_to_jiffies(msec);
221 220
221 /* These 2 times dummy status read prevents reading invalid
222 * status. A very little time (3 times of mflash operating clk)
223 * is required for busy bit is set. Use dummy read instead of
224 * busy wait, because mflash's PLL is machine dependent.
225 */
226 if (prv_data->use_polling) {
227 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
228 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
229 }
230
222 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); 231 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
223 232
224 do { 233 do {
@@ -245,8 +254,6 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
245 mg_dump_status("not ready", status, host); 254 mg_dump_status("not ready", status, host);
246 return MG_ERR_INV_STAT; 255 return MG_ERR_INV_STAT;
247 } 256 }
248 if (prv_data->use_polling)
249 msleep(1);
250 257
251 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); 258 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
252 } while (time_before(cur_jiffies, expire)); 259 } while (time_before(cur_jiffies, expire));
@@ -469,9 +476,18 @@ static unsigned int mg_out(struct mg_host *host,
469 return MG_ERR_NONE; 476 return MG_ERR_NONE;
470} 477}
471 478
479static void mg_read_one(struct mg_host *host, struct request *req)
480{
481 u16 *buff = (u16 *)req->buffer;
482 u32 i;
483
484 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
485 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
486 (i << 1));
487}
488
472static void mg_read(struct request *req) 489static void mg_read(struct request *req)
473{ 490{
474 u32 j;
475 struct mg_host *host = req->rq_disk->private_data; 491 struct mg_host *host = req->rq_disk->private_data;
476 492
477 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), 493 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
@@ -482,49 +498,65 @@ static void mg_read(struct request *req)
482 blk_rq_sectors(req), blk_rq_pos(req), req->buffer); 498 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
483 499
484 do { 500 do {
485 u16 *buff = (u16 *)req->buffer;
486
487 if (mg_wait(host, ATA_DRQ, 501 if (mg_wait(host, ATA_DRQ,
488 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { 502 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
489 mg_bad_rw_intr(host); 503 mg_bad_rw_intr(host);
490 return; 504 return;
491 } 505 }
492 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) 506
493 *buff++ = inw((unsigned long)host->dev_base + 507 mg_read_one(host, req);
494 MG_BUFF_OFFSET + (j << 1));
495 508
496 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + 509 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
497 MG_REG_COMMAND); 510 MG_REG_COMMAND);
498 } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); 511 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
499} 512}
500 513
514static void mg_write_one(struct mg_host *host, struct request *req)
515{
516 u16 *buff = (u16 *)req->buffer;
517 u32 i;
518
519 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
520 outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET +
521 (i << 1));
522}
523
501static void mg_write(struct request *req) 524static void mg_write(struct request *req)
502{ 525{
503 u32 j;
504 struct mg_host *host = req->rq_disk->private_data; 526 struct mg_host *host = req->rq_disk->private_data;
527 unsigned int rem = blk_rq_sectors(req);
505 528
506 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), 529 if (mg_out(host, blk_rq_pos(req), rem,
507 MG_CMD_WR, NULL) != MG_ERR_NONE) { 530 MG_CMD_WR, NULL) != MG_ERR_NONE) {
508 mg_bad_rw_intr(host); 531 mg_bad_rw_intr(host);
509 return; 532 return;
510 } 533 }
511 534
512 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 535 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
513 blk_rq_sectors(req), blk_rq_pos(req), req->buffer); 536 rem, blk_rq_pos(req), req->buffer);
537
538 if (mg_wait(host, ATA_DRQ,
539 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
540 mg_bad_rw_intr(host);
541 return;
542 }
514 543
515 do { 544 do {
516 u16 *buff = (u16 *)req->buffer; 545 mg_write_one(host, req);
517 546
518 if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 547 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
548 MG_REG_COMMAND);
549
550 rem--;
551 if (rem > 1 && mg_wait(host, ATA_DRQ,
552 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
553 mg_bad_rw_intr(host);
554 return;
555 } else if (mg_wait(host, MG_STAT_READY,
556 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
519 mg_bad_rw_intr(host); 557 mg_bad_rw_intr(host);
520 return; 558 return;
521 } 559 }
522 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
523 outw(*buff++, (unsigned long)host->dev_base +
524 MG_BUFF_OFFSET + (j << 1));
525
526 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
527 MG_REG_COMMAND);
528 } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); 560 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
529} 561}
530 562
@@ -532,7 +564,6 @@ static void mg_read_intr(struct mg_host *host)
532{ 564{
533 struct request *req = host->req; 565 struct request *req = host->req;
534 u32 i; 566 u32 i;
535 u16 *buff;
536 567
537 /* check status */ 568 /* check status */
538 do { 569 do {
@@ -550,13 +581,7 @@ static void mg_read_intr(struct mg_host *host)
550 return; 581 return;
551 582
552ok_to_read: 583ok_to_read:
553 /* get current segment of request */ 584 mg_read_one(host, req);
554 buff = (u16 *)req->buffer;
555
556 /* read 1 sector */
557 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
558 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
559 (i << 1));
560 585
561 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 586 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
562 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); 587 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
@@ -575,8 +600,7 @@ ok_to_read:
575static void mg_write_intr(struct mg_host *host) 600static void mg_write_intr(struct mg_host *host)
576{ 601{
577 struct request *req = host->req; 602 struct request *req = host->req;
578 u32 i, j; 603 u32 i;
579 u16 *buff;
580 bool rem; 604 bool rem;
581 605
582 /* check status */ 606 /* check status */
@@ -597,12 +621,7 @@ static void mg_write_intr(struct mg_host *host)
597ok_to_write: 621ok_to_write:
598 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { 622 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
599 /* write 1 sector and set handler if remains */ 623 /* write 1 sector and set handler if remains */
600 buff = (u16 *)req->buffer; 624 mg_write_one(host, req);
601 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
602 outw(*buff, (unsigned long)host->dev_base +
603 MG_BUFF_OFFSET + (j << 1));
604 buff++;
605 }
606 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 625 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
607 blk_rq_pos(req), blk_rq_sectors(req), req->buffer); 626 blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
608 host->mg_do_intr = mg_write_intr; 627 host->mg_do_intr = mg_write_intr;
@@ -667,9 +686,6 @@ static unsigned int mg_issue_req(struct request *req,
667 unsigned int sect_num, 686 unsigned int sect_num,
668 unsigned int sect_cnt) 687 unsigned int sect_cnt)
669{ 688{
670 u16 *buff;
671 u32 i;
672
673 switch (rq_data_dir(req)) { 689 switch (rq_data_dir(req)) {
674 case READ: 690 case READ:
675 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) 691 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
@@ -693,12 +709,7 @@ static unsigned int mg_issue_req(struct request *req,
693 mg_bad_rw_intr(host); 709 mg_bad_rw_intr(host);
694 return host->error; 710 return host->error;
695 } 711 }
696 buff = (u16 *)req->buffer; 712 mg_write_one(host, req);
697 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
698 outw(*buff, (unsigned long)host->dev_base +
699 MG_BUFF_OFFSET + (i << 1));
700 buff++;
701 }
702 mod_timer(&host->timer, jiffies + 3 * HZ); 713 mod_timer(&host->timer, jiffies + 3 * HZ);
703 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 714 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
704 MG_REG_COMMAND); 715 MG_REG_COMMAND);
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
new file mode 100644
index 000000000000..13c1aee6aa3f
--- /dev/null
+++ b/drivers/block/osdblk.c
@@ -0,0 +1,701 @@
1
2/*
3 osdblk.c -- Export a single SCSI OSD object as a Linux block device
4
5
6 Copyright 2009 Red Hat, Inc.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING. If not, write to
19 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20
21
22 Instructions for use
23 --------------------
24
25 1) Map a Linux block device to an existing OSD object.
26
27 In this example, we will use partition id 1234, object id 5678,
28 OSD device /dev/osd1.
29
30 $ echo "1234 5678 /dev/osd1" > /sys/class/osdblk/add
31
32
33 2) List all active blkdev<->object mappings.
34
35 In this example, we have performed step #1 twice, creating two blkdevs,
36 mapped to two separate OSD objects.
37
38 $ cat /sys/class/osdblk/list
39 0 174 1234 5678 /dev/osd1
40 1 179 1994 897123 /dev/osd0
41
42 The columns, in order, are:
43 - blkdev unique id
44 - blkdev assigned major
45 - OSD object partition id
46 - OSD object id
47 - OSD device
48
49
50 3) Remove an active blkdev<->object mapping.
51
52 In this example, we remove the mapping with blkdev unique id 1.
53
54 $ echo 1 > /sys/class/osdblk/remove
55
56
57 NOTE: The actual creation and deletion of OSD objects is outside the scope
58 of this driver.
59
60 */
61
62#include <linux/kernel.h>
63#include <linux/device.h>
64#include <linux/module.h>
65#include <linux/fs.h>
66#include <scsi/osd_initiator.h>
67#include <scsi/osd_attributes.h>
68#include <scsi/osd_sec.h>
69#include <scsi/scsi_device.h>
70
71#define DRV_NAME "osdblk"
72#define PFX DRV_NAME ": "
73
74/* #define _OSDBLK_DEBUG */
75#ifdef _OSDBLK_DEBUG
76#define OSDBLK_DEBUG(fmt, a...) \
77 printk(KERN_NOTICE "osdblk @%s:%d: " fmt, __func__, __LINE__, ##a)
78#else
79#define OSDBLK_DEBUG(fmt, a...) \
80 do { if (0) printk(fmt, ##a); } while (0)
81#endif
82
83MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
84MODULE_DESCRIPTION("block device inside an OSD object osdblk.ko");
85MODULE_LICENSE("GPL");
86
87struct osdblk_device;
88
89enum {
90 OSDBLK_MINORS_PER_MAJOR = 256, /* max minors per blkdev */
91 OSDBLK_MAX_REQ = 32, /* max parallel requests */
92 OSDBLK_OP_TIMEOUT = 4 * 60, /* sync OSD req timeout */
93};
94
95struct osdblk_request {
96 struct request *rq; /* blk layer request */
97 struct bio *bio; /* cloned bio */
98 struct osdblk_device *osdev; /* associated blkdev */
99};
100
101struct osdblk_device {
102 int id; /* blkdev unique id */
103
104 int major; /* blkdev assigned major */
105 struct gendisk *disk; /* blkdev's gendisk and rq */
106 struct request_queue *q;
107
108 struct osd_dev *osd; /* associated OSD */
109
110 char name[32]; /* blkdev name, e.g. osdblk34 */
111
112 spinlock_t lock; /* queue lock */
113
114 struct osd_obj_id obj; /* OSD partition, obj id */
115 uint8_t obj_cred[OSD_CAP_LEN]; /* OSD cred */
116
117 struct osdblk_request req[OSDBLK_MAX_REQ]; /* request table */
118
119 struct list_head node;
120
121 char osd_path[0]; /* OSD device path */
122};
123
124static struct class *class_osdblk; /* /sys/class/osdblk */
125static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
126static LIST_HEAD(osdblkdev_list);
127
128static struct block_device_operations osdblk_bd_ops = {
129 .owner = THIS_MODULE,
130};
131
132static const struct osd_attr g_attr_logical_length = ATTR_DEF(
133 OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
134
135static void osdblk_make_credential(u8 cred_a[OSD_CAP_LEN],
136 const struct osd_obj_id *obj)
137{
138 osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
139}
140
141/* copied from exofs; move to libosd? */
142/*
143 * Perform a synchronous OSD operation. copied from exofs; move to libosd?
144 */
145static int osd_sync_op(struct osd_request *or, int timeout, uint8_t *credential)
146{
147 int ret;
148
149 or->timeout = timeout;
150 ret = osd_finalize_request(or, 0, credential, NULL);
151 if (ret)
152 return ret;
153
154 ret = osd_execute_request(or);
155
156 /* osd_req_decode_sense(or, ret); */
157 return ret;
158}
159
160/*
161 * Perform an asynchronous OSD operation. copied from exofs; move to libosd?
162 */
163static int osd_async_op(struct osd_request *or, osd_req_done_fn *async_done,
164 void *caller_context, u8 *cred)
165{
166 int ret;
167
168 ret = osd_finalize_request(or, 0, cred, NULL);
169 if (ret)
170 return ret;
171
172 ret = osd_execute_request_async(or, async_done, caller_context);
173
174 return ret;
175}
176
177/* copied from exofs; move to libosd? */
178static int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr)
179{
180 struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
181 void *iter = NULL;
182 int nelem;
183
184 do {
185 nelem = 1;
186 osd_req_decode_get_attr_list(or, &cur_attr, &nelem, &iter);
187 if ((cur_attr.attr_page == attr->attr_page) &&
188 (cur_attr.attr_id == attr->attr_id)) {
189 attr->len = cur_attr.len;
190 attr->val_ptr = cur_attr.val_ptr;
191 return 0;
192 }
193 } while (iter);
194
195 return -EIO;
196}
197
198static int osdblk_get_obj_size(struct osdblk_device *osdev, u64 *size_out)
199{
200 struct osd_request *or;
201 struct osd_attr attr;
202 int ret;
203
204 /* start request */
205 or = osd_start_request(osdev->osd, GFP_KERNEL);
206 if (!or)
207 return -ENOMEM;
208
209 /* create a get-attributes(length) request */
210 osd_req_get_attributes(or, &osdev->obj);
211
212 osd_req_add_get_attr_list(or, &g_attr_logical_length, 1);
213
214 /* execute op synchronously */
215 ret = osd_sync_op(or, OSDBLK_OP_TIMEOUT, osdev->obj_cred);
216 if (ret)
217 goto out;
218
219 /* extract length from returned attribute info */
220 attr = g_attr_logical_length;
221 ret = extract_attr_from_req(or, &attr);
222 if (ret)
223 goto out;
224
225 *size_out = get_unaligned_be64(attr.val_ptr);
226
227out:
228 osd_end_request(or);
229 return ret;
230
231}
232
233static void osdblk_osd_complete(struct osd_request *or, void *private)
234{
235 struct osdblk_request *orq = private;
236 struct osd_sense_info osi;
237 int ret = osd_req_decode_sense(or, &osi);
238
239 if (ret) {
240 ret = -EIO;
241 OSDBLK_DEBUG("osdblk_osd_complete with err=%d\n", ret);
242 }
243
244 /* complete OSD request */
245 osd_end_request(or);
246
247 /* complete request passed to osdblk by block layer */
248 __blk_end_request_all(orq->rq, ret);
249}
250
251static void bio_chain_put(struct bio *chain)
252{
253 struct bio *tmp;
254
255 while (chain) {
256 tmp = chain;
257 chain = chain->bi_next;
258
259 bio_put(tmp);
260 }
261}
262
263static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask)
264{
265 struct bio *tmp, *new_chain = NULL, *tail = NULL;
266
267 while (old_chain) {
268 tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
269 if (!tmp)
270 goto err_out;
271
272 __bio_clone(tmp, old_chain);
273 tmp->bi_bdev = NULL;
274 gfpmask &= ~__GFP_WAIT;
275 tmp->bi_next = NULL;
276
277 if (!new_chain)
278 new_chain = tail = tmp;
279 else {
280 tail->bi_next = tmp;
281 tail = tmp;
282 }
283
284 old_chain = old_chain->bi_next;
285 }
286
287 return new_chain;
288
289err_out:
290 OSDBLK_DEBUG("bio_chain_clone with err\n");
291 bio_chain_put(new_chain);
292 return NULL;
293}
294
295static void osdblk_rq_fn(struct request_queue *q)
296{
297 struct osdblk_device *osdev = q->queuedata;
298
299 while (1) {
300 struct request *rq;
301 struct osdblk_request *orq;
302 struct osd_request *or;
303 struct bio *bio;
304 bool do_write, do_flush;
305
306 /* peek at request from block layer */
307 rq = blk_fetch_request(q);
308 if (!rq)
309 break;
310
311 /* filter out block requests we don't understand */
312 if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) {
313 blk_end_request_all(rq, 0);
314 continue;
315 }
316
317 /* deduce our operation (read, write, flush) */
318 /* I wish the block layer simplified cmd_type/cmd_flags/cmd[]
319 * into a clearly defined set of RPC commands:
320 * read, write, flush, scsi command, power mgmt req,
321 * driver-specific, etc.
322 */
323
324 do_flush = (rq->special == (void *) 0xdeadbeefUL);
325 do_write = (rq_data_dir(rq) == WRITE);
326
327 if (!do_flush) { /* osd_flush does not use a bio */
328 /* a bio clone to be passed down to OSD request */
329 bio = bio_chain_clone(rq->bio, GFP_ATOMIC);
330 if (!bio)
331 break;
332 } else
333 bio = NULL;
334
335 /* alloc internal OSD request, for OSD command execution */
336 or = osd_start_request(osdev->osd, GFP_ATOMIC);
337 if (!or) {
338 bio_chain_put(bio);
339 OSDBLK_DEBUG("osd_start_request with err\n");
340 break;
341 }
342
343 orq = &osdev->req[rq->tag];
344 orq->rq = rq;
345 orq->bio = bio;
346 orq->osdev = osdev;
347
348 /* init OSD command: flush, write or read */
349 if (do_flush)
350 osd_req_flush_object(or, &osdev->obj,
351 OSD_CDB_FLUSH_ALL, 0, 0);
352 else if (do_write)
353 osd_req_write(or, &osdev->obj, blk_rq_pos(rq) * 512ULL,
354 bio, blk_rq_bytes(rq));
355 else
356 osd_req_read(or, &osdev->obj, blk_rq_pos(rq) * 512ULL,
357 bio, blk_rq_bytes(rq));
358
359 OSDBLK_DEBUG("%s 0x%x bytes at 0x%llx\n",
360 do_flush ? "flush" : do_write ?
361 "write" : "read", blk_rq_bytes(rq),
362 blk_rq_pos(rq) * 512ULL);
363
364 /* begin OSD command execution */
365 if (osd_async_op(or, osdblk_osd_complete, orq,
366 osdev->obj_cred)) {
367 osd_end_request(or);
368 blk_requeue_request(q, rq);
369 bio_chain_put(bio);
370 OSDBLK_DEBUG("osd_execute_request_async with err\n");
371 break;
372 }
373
374 /* remove the special 'flush' marker, now that the command
375 * is executing
376 */
377 rq->special = NULL;
378 }
379}
380
381static void osdblk_prepare_flush(struct request_queue *q, struct request *rq)
382{
383 /* add driver-specific marker, to indicate that this request
384 * is a flush command
385 */
386 rq->special = (void *) 0xdeadbeefUL;
387}
388
389static void osdblk_free_disk(struct osdblk_device *osdev)
390{
391 struct gendisk *disk = osdev->disk;
392
393 if (!disk)
394 return;
395
396 if (disk->flags & GENHD_FL_UP)
397 del_gendisk(disk);
398 if (disk->queue)
399 blk_cleanup_queue(disk->queue);
400 put_disk(disk);
401}
402
403static int osdblk_init_disk(struct osdblk_device *osdev)
404{
405 struct gendisk *disk;
406 struct request_queue *q;
407 int rc;
408 u64 obj_size = 0;
409
410 /* contact OSD, request size info about the object being mapped */
411 rc = osdblk_get_obj_size(osdev, &obj_size);
412 if (rc)
413 return rc;
414
415 /* create gendisk info */
416 disk = alloc_disk(OSDBLK_MINORS_PER_MAJOR);
417 if (!disk)
418 return -ENOMEM;
419
420 sprintf(disk->disk_name, DRV_NAME "%d", osdev->id);
421 disk->major = osdev->major;
422 disk->first_minor = 0;
423 disk->fops = &osdblk_bd_ops;
424 disk->private_data = osdev;
425
426 /* init rq */
427 q = blk_init_queue(osdblk_rq_fn, &osdev->lock);
428 if (!q) {
429 put_disk(disk);
430 return -ENOMEM;
431 }
432
433 /* switch queue to TCQ mode; allocate tag map */
434 rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL);
435 if (rc) {
436 blk_cleanup_queue(q);
437 put_disk(disk);
438 return rc;
439 }
440
441 /* Set our limits to the lower device limits, because osdblk cannot
442 * sleep when allocating a lower-request and therefore cannot be
443 * bouncing.
444 */
445 blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
446
447 blk_queue_prep_rq(q, blk_queue_start_tag);
448 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, osdblk_prepare_flush);
449
450 disk->queue = q;
451
452 q->queuedata = osdev;
453
454 osdev->disk = disk;
455 osdev->q = q;
456
457 /* finally, announce the disk to the world */
458 set_capacity(disk, obj_size / 512ULL);
459 add_disk(disk);
460
461 printk(KERN_INFO "%s: Added of size 0x%llx\n",
462 disk->disk_name, (unsigned long long)obj_size);
463
464 return 0;
465}
466
467/********************************************************************
468 * /sys/class/osdblk/
469 * add map OSD object to blkdev
470 * remove unmap OSD object
471 * list show mappings
472 *******************************************************************/
473
474static void class_osdblk_release(struct class *cls)
475{
476 kfree(cls);
477}
478
479static ssize_t class_osdblk_list(struct class *c, char *data)
480{
481 int n = 0;
482 struct list_head *tmp;
483
484 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
485
486 list_for_each(tmp, &osdblkdev_list) {
487 struct osdblk_device *osdev;
488
489 osdev = list_entry(tmp, struct osdblk_device, node);
490
491 n += sprintf(data+n, "%d %d %llu %llu %s\n",
492 osdev->id,
493 osdev->major,
494 osdev->obj.partition,
495 osdev->obj.id,
496 osdev->osd_path);
497 }
498
499 mutex_unlock(&ctl_mutex);
500 return n;
501}
502
503static ssize_t class_osdblk_add(struct class *c, const char *buf, size_t count)
504{
505 struct osdblk_device *osdev;
506 ssize_t rc;
507 int irc, new_id = 0;
508 struct list_head *tmp;
509
510 if (!try_module_get(THIS_MODULE))
511 return -ENODEV;
512
513 /* new osdblk_device object */
514 osdev = kzalloc(sizeof(*osdev) + strlen(buf) + 1, GFP_KERNEL);
515 if (!osdev) {
516 rc = -ENOMEM;
517 goto err_out_mod;
518 }
519
520 /* static osdblk_device initialization */
521 spin_lock_init(&osdev->lock);
522 INIT_LIST_HEAD(&osdev->node);
523
524 /* generate unique id: find highest unique id, add one */
525
526 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
527
528 list_for_each(tmp, &osdblkdev_list) {
529 struct osdblk_device *osdev;
530
531 osdev = list_entry(tmp, struct osdblk_device, node);
532 if (osdev->id > new_id)
533 new_id = osdev->id + 1;
534 }
535
536 osdev->id = new_id;
537
538 /* add to global list */
539 list_add_tail(&osdev->node, &osdblkdev_list);
540
541 mutex_unlock(&ctl_mutex);
542
543 /* parse add command */
544 if (sscanf(buf, "%llu %llu %s", &osdev->obj.partition, &osdev->obj.id,
545 osdev->osd_path) != 3) {
546 rc = -EINVAL;
547 goto err_out_slot;
548 }
549
550 /* initialize rest of new object */
551 sprintf(osdev->name, DRV_NAME "%d", osdev->id);
552
553 /* contact requested OSD */
554 osdev->osd = osduld_path_lookup(osdev->osd_path);
555 if (IS_ERR(osdev->osd)) {
556 rc = PTR_ERR(osdev->osd);
557 goto err_out_slot;
558 }
559
560 /* build OSD credential */
561 osdblk_make_credential(osdev->obj_cred, &osdev->obj);
562
563 /* register our block device */
564 irc = register_blkdev(0, osdev->name);
565 if (irc < 0) {
566 rc = irc;
567 goto err_out_osd;
568 }
569
570 osdev->major = irc;
571
572 /* set up and announce blkdev mapping */
573 rc = osdblk_init_disk(osdev);
574 if (rc)
575 goto err_out_blkdev;
576
577 return count;
578
579err_out_blkdev:
580 unregister_blkdev(osdev->major, osdev->name);
581err_out_osd:
582 osduld_put_device(osdev->osd);
583err_out_slot:
584 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
585 list_del_init(&osdev->node);
586 mutex_unlock(&ctl_mutex);
587
588 kfree(osdev);
589err_out_mod:
590 OSDBLK_DEBUG("Error adding device %s\n", buf);
591 module_put(THIS_MODULE);
592 return rc;
593}
594
595static ssize_t class_osdblk_remove(struct class *c, const char *buf,
596 size_t count)
597{
598 struct osdblk_device *osdev = NULL;
599 int target_id, rc;
600 unsigned long ul;
601 struct list_head *tmp;
602
603 rc = strict_strtoul(buf, 10, &ul);
604 if (rc)
605 return rc;
606
607 /* convert to int; abort if we lost anything in the conversion */
608 target_id = (int) ul;
609 if (target_id != ul)
610 return -EINVAL;
611
612 /* remove object from list immediately */
613 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
614
615 list_for_each(tmp, &osdblkdev_list) {
616 osdev = list_entry(tmp, struct osdblk_device, node);
617 if (osdev->id == target_id) {
618 list_del_init(&osdev->node);
619 break;
620 }
621 osdev = NULL;
622 }
623
624 mutex_unlock(&ctl_mutex);
625
626 if (!osdev)
627 return -ENOENT;
628
629 /* clean up and free blkdev and associated OSD connection */
630 osdblk_free_disk(osdev);
631 unregister_blkdev(osdev->major, osdev->name);
632 osduld_put_device(osdev->osd);
633 kfree(osdev);
634
635 /* release module ref */
636 module_put(THIS_MODULE);
637
638 return count;
639}
640
641static struct class_attribute class_osdblk_attrs[] = {
642 __ATTR(add, 0200, NULL, class_osdblk_add),
643 __ATTR(remove, 0200, NULL, class_osdblk_remove),
644 __ATTR(list, 0444, class_osdblk_list, NULL),
645 __ATTR_NULL
646};
647
648static int osdblk_sysfs_init(void)
649{
650 int ret = 0;
651
652 /*
653 * create control files in sysfs
654 * /sys/class/osdblk/...
655 */
656 class_osdblk = kzalloc(sizeof(*class_osdblk), GFP_KERNEL);
657 if (!class_osdblk)
658 return -ENOMEM;
659
660 class_osdblk->name = DRV_NAME;
661 class_osdblk->owner = THIS_MODULE;
662 class_osdblk->class_release = class_osdblk_release;
663 class_osdblk->class_attrs = class_osdblk_attrs;
664
665 ret = class_register(class_osdblk);
666 if (ret) {
667 kfree(class_osdblk);
668 class_osdblk = NULL;
669 printk(PFX "failed to create class osdblk\n");
670 return ret;
671 }
672
673 return 0;
674}
675
676static void osdblk_sysfs_cleanup(void)
677{
678 if (class_osdblk)
679 class_destroy(class_osdblk);
680 class_osdblk = NULL;
681}
682
683static int __init osdblk_init(void)
684{
685 int rc;
686
687 rc = osdblk_sysfs_init();
688 if (rc)
689 return rc;
690
691 return 0;
692}
693
694static void __exit osdblk_exit(void)
695{
696 osdblk_sysfs_cleanup();
697}
698
699module_init(osdblk_init);
700module_exit(osdblk_exit);
701
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 83650e00632d..99a506f619b7 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1372,8 +1372,10 @@ try_next_bio:
1372 wakeup = (pd->write_congestion_on > 0 1372 wakeup = (pd->write_congestion_on > 0
1373 && pd->bio_queue_size <= pd->write_congestion_off); 1373 && pd->bio_queue_size <= pd->write_congestion_off);
1374 spin_unlock(&pd->lock); 1374 spin_unlock(&pd->lock);
1375 if (wakeup) 1375 if (wakeup) {
1376 clear_bdi_congested(&pd->disk->queue->backing_dev_info, WRITE); 1376 clear_bdi_congested(&pd->disk->queue->backing_dev_info,
1377 BLK_RW_ASYNC);
1378 }
1377 1379
1378 pkt->sleep_time = max(PACKET_WAIT_TIME, 1); 1380 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1379 pkt_set_state(pkt, PACKET_WAITING_STATE); 1381 pkt_set_state(pkt, PACKET_WAITING_STATE);
@@ -2592,10 +2594,10 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
2592 spin_lock(&pd->lock); 2594 spin_lock(&pd->lock);
2593 if (pd->write_congestion_on > 0 2595 if (pd->write_congestion_on > 0
2594 && pd->bio_queue_size >= pd->write_congestion_on) { 2596 && pd->bio_queue_size >= pd->write_congestion_on) {
2595 set_bdi_congested(&q->backing_dev_info, WRITE); 2597 set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
2596 do { 2598 do {
2597 spin_unlock(&pd->lock); 2599 spin_unlock(&pd->lock);
2598 congestion_wait(WRITE, HZ); 2600 congestion_wait(BLK_RW_ASYNC, HZ);
2599 spin_lock(&pd->lock); 2601 spin_lock(&pd->lock);
2600 } while(pd->bio_queue_size > pd->write_congestion_off); 2602 } while(pd->bio_queue_size > pd->write_congestion_off);
2601 } 2603 }
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 43db3ea15b54..aa1a3d5a3e2b 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -213,7 +213,7 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
213 * Only allow the generic SCSI ioctls if the host can support it. 213 * Only allow the generic SCSI ioctls if the host can support it.
214 */ 214 */
215 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) 215 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
216 return -ENOIOCTLCMD; 216 return -ENOTTY;
217 217
218 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); 218 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
219} 219}
@@ -360,6 +360,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
360 blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2); 360 blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
361 blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2); 361 blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
362 362
363 /* No need to bounce any requests */
364 blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
365
363 /* No real sector limit. */ 366 /* No real sector limit. */
364 blk_queue_max_sectors(vblk->disk->queue, -1U); 367 blk_queue_max_sectors(vblk->disk->queue, -1U);
365 368
@@ -424,7 +427,12 @@ static unsigned int features[] = {
424 VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY 427 VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY
425}; 428};
426 429
427static struct virtio_driver virtio_blk = { 430/*
431 * virtio_blk causes spurious section mismatch warning by
432 * simultaneously referring to a __devinit and a __devexit function.
433 * Use __refdata to avoid this warning.
434 */
435static struct virtio_driver __refdata virtio_blk = {
428 .feature_table = features, 436 .feature_table = features,
429 .feature_table_size = ARRAY_SIZE(features), 437 .feature_table_size = ARRAY_SIZE(features),
430 .driver.name = KBUILD_MODNAME, 438 .driver.name = KBUILD_MODNAME,
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index f08491a3a813..b20abe102a2b 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -390,9 +390,10 @@ static inline void ace_dump_mem(void *base, int len)
390 390
391static void ace_dump_regs(struct ace_device *ace) 391static void ace_dump_regs(struct ace_device *ace)
392{ 392{
393 dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n" 393 dev_info(ace->dev,
394 KERN_INFO " status:%.8x mpu_lba:%.8x busmode:%4x\n" 394 " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n"
395 KERN_INFO " error: %.8x cfg_lba:%.8x fatstat:%.4x\n", 395 " status:%.8x mpu_lba:%.8x busmode:%4x\n"
396 " error: %.8x cfg_lba:%.8x fatstat:%.4x\n",
396 ace_in32(ace, ACE_CTRL), 397 ace_in32(ace, ACE_CTRL),
397 ace_in(ace, ACE_SECCNTCMD), 398 ace_in(ace, ACE_SECCNTCMD),
398 ace_in(ace, ACE_VERSION), 399 ace_in(ace, ACE_VERSION),
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 4575171e5beb..b2590409f25e 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -374,7 +374,7 @@ err:
374static void __exit z2_exit(void) 374static void __exit z2_exit(void)
375{ 375{
376 int i, j; 376 int i, j;
377 blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256); 377 blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT);
378 unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME); 378 unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
379 del_gendisk(z2ram_gendisk); 379 del_gendisk(z2ram_gendisk);
380 put_disk(z2ram_gendisk); 380 put_disk(z2ram_gendisk);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 1df9dda2e377..d5cde6d86f89 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -28,7 +28,6 @@
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/smp_lock.h>
32#include <linux/types.h> 31#include <linux/types.h>
33#include <linux/errno.h> 32#include <linux/errno.h>
34#include <linux/sched.h> 33#include <linux/sched.h>
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 0bd01f49cfd8..6a06913b01d3 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1029,10 +1029,6 @@ config CS5535_GPIO
1029 1029
1030 If compiled as a module, it will be called cs5535_gpio. 1030 If compiled as a module, it will be called cs5535_gpio.
1031 1031
1032config GPIO_VR41XX
1033 tristate "NEC VR4100 series General-purpose I/O Unit support"
1034 depends on CPU_VR41XX
1035
1036config RAW_DRIVER 1032config RAW_DRIVER
1037 tristate "RAW driver (/dev/raw/rawN)" 1033 tristate "RAW driver (/dev/raw/rawN)"
1038 depends on BLOCK 1034 depends on BLOCK
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 189efcff08ce..66f779ad4f4c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -95,7 +95,6 @@ obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
95obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o 95obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
96obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o 96obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
97obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o 97obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
98obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
99obj-$(CONFIG_GPIO_TB0219) += tb0219.o 98obj-$(CONFIG_GPIO_TB0219) += tb0219.o
100obj-$(CONFIG_TELCLOCK) += tlclk.o 99obj-$(CONFIG_TELCLOCK) += tlclk.o
101 100
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index f4bb43fb8016..e077701ae3d9 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -225,7 +225,7 @@ static const struct agp_bridge_driver parisc_agp_driver = {
225 .configure = parisc_agp_configure, 225 .configure = parisc_agp_configure,
226 .fetch_size = parisc_agp_fetch_size, 226 .fetch_size = parisc_agp_fetch_size,
227 .tlb_flush = parisc_agp_tlbflush, 227 .tlb_flush = parisc_agp_tlbflush,
228 .mask_memory = parisc_agp_mask_memory, 228 .mask_memory = parisc_agp_page_mask_memory,
229 .masks = parisc_agp_masks, 229 .masks = parisc_agp_masks,
230 .agp_enable = parisc_agp_enable, 230 .agp_enable = parisc_agp_enable,
231 .cache_flush = global_cache_flush, 231 .cache_flush = global_cache_flush,
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 72429b6b2fa8..6c32fbf07164 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -81,6 +81,7 @@ static char *serial_version = "4.30";
81#include <linux/mm.h> 81#include <linux/mm.h>
82#include <linux/seq_file.h> 82#include <linux/seq_file.h>
83#include <linux/slab.h> 83#include <linux/slab.h>
84#include <linux/smp_lock.h>
84#include <linux/init.h> 85#include <linux/init.h>
85#include <linux/bitops.h> 86#include <linux/bitops.h>
86 87
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 140ea10ecb88..c02db01f736e 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -27,6 +27,7 @@
27#include <linux/cdev.h> 27#include <linux/cdev.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <asm/pgtable.h>
30#include <asm/io.h> 31#include <asm/io.h>
31 32
32/* 33/*
@@ -75,12 +76,13 @@ static struct class *bsr_class;
75static int bsr_major; 76static int bsr_major;
76 77
77enum { 78enum {
78 BSR_8 = 0, 79 BSR_8 = 0,
79 BSR_16 = 1, 80 BSR_16 = 1,
80 BSR_64 = 2, 81 BSR_64 = 2,
81 BSR_128 = 3, 82 BSR_128 = 3,
82 BSR_UNKNOWN = 4, 83 BSR_4096 = 4,
83 BSR_MAX = 5, 84 BSR_UNKNOWN = 5,
85 BSR_MAX = 6,
84}; 86};
85 87
86static unsigned bsr_types[BSR_MAX]; 88static unsigned bsr_types[BSR_MAX];
@@ -117,15 +119,22 @@ static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
117{ 119{
118 unsigned long size = vma->vm_end - vma->vm_start; 120 unsigned long size = vma->vm_end - vma->vm_start;
119 struct bsr_dev *dev = filp->private_data; 121 struct bsr_dev *dev = filp->private_data;
122 int ret;
120 123
121 if (size > dev->bsr_len || (size & (PAGE_SIZE-1)))
122 return -EINVAL;
123
124 vma->vm_flags |= (VM_IO | VM_DONTEXPAND);
125 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 124 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
126 125
127 if (io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT, 126 /* check for the case of a small BSR device and map one 4k page for it*/
128 size, vma->vm_page_prot)) 127 if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
128 ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
129 vma->vm_page_prot);
130 else if (size <= dev->bsr_len)
131 ret = io_remap_pfn_range(vma, vma->vm_start,
132 dev->bsr_addr >> PAGE_SHIFT,
133 size, vma->vm_page_prot);
134 else
135 return -EINVAL;
136
137 if (ret)
129 return -EAGAIN; 138 return -EAGAIN;
130 139
131 return 0; 140 return 0;
@@ -205,6 +214,11 @@ static int bsr_add_node(struct device_node *bn)
205 cur->bsr_stride = bsr_stride[i]; 214 cur->bsr_stride = bsr_stride[i];
206 cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs); 215 cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
207 216
217 /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
218 /* we can only map 4k of it, so only advertise the 4k in sysfs */
219 if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
220 cur->bsr_len = 4096;
221
208 switch(cur->bsr_bytes) { 222 switch(cur->bsr_bytes) {
209 case 8: 223 case 8:
210 cur->bsr_type = BSR_8; 224 cur->bsr_type = BSR_8;
@@ -218,9 +232,11 @@ static int bsr_add_node(struct device_node *bn)
218 case 128: 232 case 128:
219 cur->bsr_type = BSR_128; 233 cur->bsr_type = BSR_128;
220 break; 234 break;
235 case 4096:
236 cur->bsr_type = BSR_4096;
237 break;
221 default: 238 default:
222 cur->bsr_type = BSR_UNKNOWN; 239 cur->bsr_type = BSR_UNKNOWN;
223 printk(KERN_INFO "unknown BSR size %d\n",cur->bsr_bytes);
224 } 240 }
225 241
226 cur->bsr_num = bsr_types[cur->bsr_type]; 242 cur->bsr_num = bsr_types[cur->bsr_type];
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index f3366d3f06cf..2dafc2da0648 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -633,6 +633,7 @@
633#include <linux/tty.h> 633#include <linux/tty.h>
634#include <linux/tty_flip.h> 634#include <linux/tty_flip.h>
635#include <linux/serial.h> 635#include <linux/serial.h>
636#include <linux/smp_lock.h>
636#include <linux/major.h> 637#include <linux/major.h>
637#include <linux/string.h> 638#include <linux/string.h>
638#include <linux/fcntl.h> 639#include <linux/fcntl.h>
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index abef1f7d84fe..ff647ca1c489 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -36,6 +36,7 @@
36#include <linux/tty.h> 36#include <linux/tty.h>
37#include <linux/tty_flip.h> 37#include <linux/tty_flip.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/smp_lock.h>
39#include <linux/ioport.h> 40#include <linux/ioport.h>
40#include <linux/interrupt.h> 41#include <linux/interrupt.h>
41#include <linux/uaccess.h> 42#include <linux/uaccess.h>
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 94e7e3c8c05a..d97779ef72cb 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -552,7 +552,7 @@ static int hvc_chars_in_buffer(struct tty_struct *tty)
552 struct hvc_struct *hp = tty->driver_data; 552 struct hvc_struct *hp = tty->driver_data;
553 553
554 if (!hp) 554 if (!hp)
555 return -1; 555 return 0;
556 return hp->n_outbuf; 556 return hp->n_outbuf;
557} 557}
558 558
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 5dcbe603eca2..91b53eb1c053 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -305,10 +305,11 @@ static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw,
305 (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)) 305 (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
306 == BIOS_CNTL_LOCK_ENABLE_MASK) { 306 == BIOS_CNTL_LOCK_ENABLE_MASK) {
307 static __initdata /*const*/ char warning[] = 307 static __initdata /*const*/ char warning[] =
308 KERN_WARNING PFX "Firmware space is locked read-only. If you can't or\n" 308 KERN_WARNING
309 KERN_WARNING PFX "don't want to disable this in firmware setup, and if\n" 309PFX "Firmware space is locked read-only. If you can't or\n"
310 KERN_WARNING PFX "you are certain that your system has a functional\n" 310PFX "don't want to disable this in firmware setup, and if\n"
311 KERN_WARNING PFX "RNG, try using the 'no_fwh_detect' option.\n"; 311PFX "you are certain that your system has a functional\n"
312PFX "RNG, try using the 'no_fwh_detect' option.\n";
312 313
313 if (no_fwh_detect) 314 if (no_fwh_detect)
314 return -ENODEV; 315 return -ENODEV;
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 4159292e35cf..4f1f4cd670da 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -122,6 +122,7 @@
122#include <linux/fs.h> 122#include <linux/fs.h>
123#include <linux/sched.h> 123#include <linux/sched.h>
124#include <linux/serial.h> 124#include <linux/serial.h>
125#include <linux/smp_lock.h>
125#include <linux/mm.h> 126#include <linux/mm.h>
126#include <linux/interrupt.h> 127#include <linux/interrupt.h>
127#include <linux/timer.h> 128#include <linux/timer.h>
@@ -1478,10 +1479,10 @@ static int __devinit load_firmware(struct pci_dev *pdev,
1478 status = inw(base + 0x4); 1479 status = inw(base + 0x4);
1479 if (status != 0) { 1480 if (status != 0) {
1480 dev_warn(&pdev->dev, "Card%d rejected load header:\n" 1481 dev_warn(&pdev->dev, "Card%d rejected load header:\n"
1481 KERN_WARNING "Address:0x%x\n" 1482 "Address:0x%x\n"
1482 KERN_WARNING "Count:0x%x\n" 1483 "Count:0x%x\n"
1483 KERN_WARNING "Status:0x%x\n", 1484 "Status:0x%x\n",
1484 index + 1, frame->addr, frame->count, status); 1485 index + 1, frame->addr, frame->count, status);
1485 goto errrelfw; 1486 goto errrelfw;
1486 } 1487 }
1487 outsw(base, frame->data, word_count); 1488 outsw(base, frame->data, word_count);
@@ -1526,10 +1527,10 @@ static int __devinit load_firmware(struct pci_dev *pdev,
1526 status = inw(base + 0x4); 1527 status = inw(base + 0x4);
1527 if (status != 0) { 1528 if (status != 0) {
1528 dev_warn(&pdev->dev, "Card%d rejected verify header:\n" 1529 dev_warn(&pdev->dev, "Card%d rejected verify header:\n"
1529 KERN_WARNING "Address:0x%x\n" 1530 "Address:0x%x\n"
1530 KERN_WARNING "Count:0x%x\n" 1531 "Count:0x%x\n"
1531 KERN_WARNING "Status: 0x%x\n", 1532 "Status: 0x%x\n",
1532 index + 1, frame->addr, frame->count, status); 1533 index + 1, frame->addr, frame->count, status);
1533 goto errrelfw; 1534 goto errrelfw;
1534 } 1535 }
1535 1536
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 0c999f5bb3db..ab2f3349c5c4 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/smp_lock.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/tty.h> 25#include <linux/tty.h>
25#include <linux/tty_flip.h> 26#include <linux/tty_flip.h>
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 65b6ff2442c6..dd0083bbb64a 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -34,6 +34,7 @@
34#include <linux/tty.h> 34#include <linux/tty.h>
35#include <linux/tty_flip.h> 35#include <linux/tty_flip.h>
36#include <linux/major.h> 36#include <linux/major.h>
37#include <linux/smp_lock.h>
37#include <linux/string.h> 38#include <linux/string.h>
38#include <linux/fcntl.h> 39#include <linux/fcntl.h>
39#include <linux/ptrace.h> 40#include <linux/ptrace.h>
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 52d953eb30c3..dbf8d52f31d0 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -23,6 +23,7 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/signal.h> 24#include <linux/signal.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/smp_lock.h>
26#include <linux/timer.h> 27#include <linux/timer.h>
27#include <linux/interrupt.h> 28#include <linux/interrupt.h>
28#include <linux/tty.h> 29#include <linux/tty.h>
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index 1c43c8cdee25..c68118efad84 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -97,6 +97,7 @@
97#include <linux/slab.h> 97#include <linux/slab.h>
98#include <linux/tty.h> 98#include <linux/tty.h>
99#include <linux/errno.h> 99#include <linux/errno.h>
100#include <linux/smp_lock.h>
100#include <linux/string.h> /* used in new tty drivers */ 101#include <linux/string.h> /* used in new tty drivers */
101#include <linux/signal.h> /* used in new tty drivers */ 102#include <linux/signal.h> /* used in new tty drivers */
102#include <linux/if.h> 103#include <linux/if.h>
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index 2e99158ebb8a..6934025a1ac1 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -58,6 +58,7 @@
58#include <linux/ioport.h> 58#include <linux/ioport.h>
59#include <linux/in.h> 59#include <linux/in.h>
60#include <linux/slab.h> 60#include <linux/slab.h>
61#include <linux/smp_lock.h>
61#include <linux/tty.h> 62#include <linux/tty.h>
62#include <linux/errno.h> 63#include <linux/errno.h>
63#include <linux/string.h> /* used in new tty drivers */ 64#include <linux/string.h> /* used in new tty drivers */
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 94a5d5020abc..973be2f44195 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -1331,9 +1331,6 @@ handle_newline:
1331 1331
1332static void n_tty_write_wakeup(struct tty_struct *tty) 1332static void n_tty_write_wakeup(struct tty_struct *tty)
1333{ 1333{
1334 /* Write out any echoed characters that are still pending */
1335 process_echoes(tty);
1336
1337 if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) 1334 if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags))
1338 kill_fasync(&tty->fasync, SIGIO, POLL_OUT); 1335 kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
1339} 1336}
@@ -1586,6 +1583,7 @@ static int n_tty_open(struct tty_struct *tty)
1586 1583
1587static inline int input_available_p(struct tty_struct *tty, int amt) 1584static inline int input_available_p(struct tty_struct *tty, int amt)
1588{ 1585{
1586 tty_flush_to_ldisc(tty);
1589 if (tty->icanon) { 1587 if (tty->icanon) {
1590 if (tty->canon_data) 1588 if (tty->canon_data)
1591 return 1; 1589 return 1;
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index 574f1c79b6e6..ec58d8c387ff 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -828,7 +828,7 @@ static int receive_data(enum port_type index, struct nozomi *dc)
828 struct port *port = &dc->port[index]; 828 struct port *port = &dc->port[index];
829 void __iomem *addr = port->dl_addr[port->toggle_dl]; 829 void __iomem *addr = port->dl_addr[port->toggle_dl];
830 struct tty_struct *tty = tty_port_tty_get(&port->port); 830 struct tty_struct *tty = tty_port_tty_get(&port->port);
831 int i; 831 int i, ret;
832 832
833 if (unlikely(!tty)) { 833 if (unlikely(!tty)) {
834 DBG1("tty not open for port: %d?", index); 834 DBG1("tty not open for port: %d?", index);
@@ -844,12 +844,14 @@ static int receive_data(enum port_type index, struct nozomi *dc)
844 844
845 /* disable interrupt in downlink... */ 845 /* disable interrupt in downlink... */
846 disable_transmit_dl(index, dc); 846 disable_transmit_dl(index, dc);
847 return 0; 847 ret = 0;
848 goto put;
848 } 849 }
849 850
850 if (unlikely(size == 0)) { 851 if (unlikely(size == 0)) {
851 dev_err(&dc->pdev->dev, "size == 0?\n"); 852 dev_err(&dc->pdev->dev, "size == 0?\n");
852 return 1; 853 ret = 1;
854 goto put;
853 } 855 }
854 856
855 tty_buffer_request_room(tty, size); 857 tty_buffer_request_room(tty, size);
@@ -871,8 +873,10 @@ static int receive_data(enum port_type index, struct nozomi *dc)
871 } 873 }
872 874
873 set_bit(index, &dc->flip); 875 set_bit(index, &dc->flip);
876 ret = 1;
877put:
874 tty_kref_put(tty); 878 tty_kref_put(tty);
875 return 1; 879 return ret;
876} 880}
877 881
878/* Debug for interrupts */ 882/* Debug for interrupts */
@@ -1862,16 +1866,14 @@ static s32 ntty_chars_in_buffer(struct tty_struct *tty)
1862{ 1866{
1863 struct port *port = tty->driver_data; 1867 struct port *port = tty->driver_data;
1864 struct nozomi *dc = get_dc_by_tty(tty); 1868 struct nozomi *dc = get_dc_by_tty(tty);
1865 s32 rval; 1869 s32 rval = 0;
1866 1870
1867 if (unlikely(!dc || !port)) { 1871 if (unlikely(!dc || !port)) {
1868 rval = -ENODEV;
1869 goto exit_in_buffer; 1872 goto exit_in_buffer;
1870 } 1873 }
1871 1874
1872 if (unlikely(!port->port.count)) { 1875 if (unlikely(!port->port.count)) {
1873 dev_err(&dc->pdev->dev, "No tty open?\n"); 1876 dev_err(&dc->pdev->dev, "No tty open?\n");
1874 rval = -ENODEV;
1875 goto exit_in_buffer; 1877 goto exit_in_buffer;
1876 } 1878 }
1877 1879
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
index 569f2f7743a7..674b3ab3587d 100644
--- a/drivers/char/pcmcia/ipwireless/tty.c
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -320,10 +320,10 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
320 struct ipw_tty *tty = linux_tty->driver_data; 320 struct ipw_tty *tty = linux_tty->driver_data;
321 321
322 if (!tty) 322 if (!tty)
323 return -ENODEV; 323 return 0;
324 324
325 if (!tty->open_count) 325 if (!tty->open_count)
326 return -EINVAL; 326 return 0;
327 327
328 return tty->tx_bytes_queued; 328 return tty->tx_bytes_queued;
329} 329}
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index daebe1ba43d4..6e6942c45f5b 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -22,6 +22,7 @@
22#include <linux/major.h> 22#include <linux/major.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/smp_lock.h>
25#include <linux/sysctl.h> 26#include <linux/sysctl.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/uaccess.h> 28#include <linux/uaccess.h>
@@ -75,114 +76,88 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
75 */ 76 */
76static void pty_unthrottle(struct tty_struct *tty) 77static void pty_unthrottle(struct tty_struct *tty)
77{ 78{
78 struct tty_struct *o_tty = tty->link; 79 tty_wakeup(tty->link);
79
80 if (!o_tty)
81 return;
82
83 tty_wakeup(o_tty);
84 set_bit(TTY_THROTTLED, &tty->flags); 80 set_bit(TTY_THROTTLED, &tty->flags);
85} 81}
86 82
87/* 83/**
88 * WSH 05/24/97: modified to 84 * pty_space - report space left for writing
89 * (1) use space in tty->flip instead of a shared temp buffer 85 * @to: tty we are writing into
90 * The flip buffers aren't being used for a pty, so there's lots
91 * of space available. The buffer is protected by a per-pty
92 * semaphore that should almost never come under contention.
93 * (2) avoid redundant copying for cases where count >> receive_room
94 * N.B. Calls from user space may now return an error code instead of
95 * a count.
96 * 86 *
97 * FIXME: Our pty_write method is called with our ldisc lock held but 87 * The tty buffers allow 64K but we sneak a peak and clip at 8K this
98 * not our partners. We can't just wait on the other one blindly without 88 * allows a lot of overspill room for echo and other fun messes to
99 * risking deadlocks. At some point when everything has settled down we need 89 * be handled properly
100 * to look into making pty_write at least able to sleep over an ldisc change. 90 */
91
92static int pty_space(struct tty_struct *to)
93{
94 int n = 8192 - to->buf.memory_used;
95 if (n < 0)
96 return 0;
97 return n;
98}
99
100/**
101 * pty_write - write to a pty
102 * @tty: the tty we write from
103 * @buf: kernel buffer of data
104 * @count: bytes to write
101 * 105 *
102 * The return on no ldisc is a bit counter intuitive but the logic works 106 * Our "hardware" write method. Data is coming from the ldisc which
103 * like this. During an ldisc change the other end will flush its buffers. We 107 * may be in a non sleeping state. We simply throw this at the other
104 * thus return the full length which is identical to the case where we had 108 * end of the link as if we were an IRQ handler receiving stuff for
105 * proper locking and happened to queue the bytes just before the flush during 109 * the other side of the pty/tty pair.
106 * the ldisc change.
107 */ 110 */
111
108static int pty_write(struct tty_struct *tty, const unsigned char *buf, 112static int pty_write(struct tty_struct *tty, const unsigned char *buf,
109 int count) 113 int count)
110{ 114{
111 struct tty_struct *to = tty->link; 115 struct tty_struct *to = tty->link;
112 struct tty_ldisc *ld; 116 int c;
113 int c = count;
114 117
115 if (!to || tty->stopped) 118 if (tty->stopped)
116 return 0; 119 return 0;
117 ld = tty_ldisc_ref(to); 120
118 121 /* This isn't locked but our 8K is quite sloppy so no
119 if (ld) { 122 big deal */
120 c = to->receive_room; 123
121 if (c > count) 124 c = pty_space(to);
122 c = count; 125 if (c > count)
123 ld->ops->receive_buf(to, buf, NULL, c); 126 c = count;
124 tty_ldisc_deref(ld); 127 if (c > 0) {
128 /* Stuff the data into the input queue of the other end */
129 c = tty_insert_flip_string(to, buf, c);
130 /* And shovel */
131 tty_flip_buffer_push(to);
132 tty_wakeup(tty);
125 } 133 }
126 return c; 134 return c;
127} 135}
128 136
137/**
138 * pty_write_room - write space
139 * @tty: tty we are writing from
140 *
141 * Report how many bytes the ldisc can send into the queue for
142 * the other device.
143 */
144
129static int pty_write_room(struct tty_struct *tty) 145static int pty_write_room(struct tty_struct *tty)
130{ 146{
131 struct tty_struct *to = tty->link; 147 return pty_space(tty->link);
132
133 if (!to || tty->stopped)
134 return 0;
135
136 return to->receive_room;
137} 148}
138 149
139/* 150/**
140 * WSH 05/24/97: Modified for asymmetric MASTER/SLAVE behavior 151 * pty_chars_in_buffer - characters currently in our tx queue
141 * The chars_in_buffer() value is used by the ldisc select() function 152 * @tty: our tty
142 * to hold off writing when chars_in_buffer > WAKEUP_CHARS (== 256).
143 * The pty driver chars_in_buffer() Master/Slave must behave differently:
144 *
145 * The Master side needs to allow typed-ahead commands to accumulate
146 * while being canonicalized, so we report "our buffer" as empty until
147 * some threshold is reached, and then report the count. (Any count >
148 * WAKEUP_CHARS is regarded by select() as "full".) To avoid deadlock
149 * the count returned must be 0 if no canonical data is available to be
150 * read. (The N_TTY ldisc.chars_in_buffer now knows this.)
151 * 153 *
152 * The Slave side passes all characters in raw mode to the Master side's 154 * Report how much we have in the transmit queue. As everything is
153 * buffer where they can be read immediately, so in this case we can 155 * instantly at the other end this is easy to implement.
154 * return the true count in the buffer.
155 */ 156 */
157
156static int pty_chars_in_buffer(struct tty_struct *tty) 158static int pty_chars_in_buffer(struct tty_struct *tty)
157{ 159{
158 struct tty_struct *to = tty->link; 160 return 0;
159 struct tty_ldisc *ld;
160 int count = 0;
161
162 /* We should get the line discipline lock for "tty->link" */
163 if (!to)
164 return 0;
165 /* We cannot take a sleeping reference here without deadlocking with
166 an ldisc change - but it doesn't really matter */
167 ld = tty_ldisc_ref(to);
168 if (ld == NULL)
169 return 0;
170
171 /* The ldisc must report 0 if no characters available to be read */
172 if (ld->ops->chars_in_buffer)
173 count = ld->ops->chars_in_buffer(to);
174
175 tty_ldisc_deref(ld);
176
177 if (tty->driver->subtype == PTY_TYPE_SLAVE)
178 return count;
179
180 /* Master side driver ... if the other side's read buffer is less than
181 * half full, return 0 to allow writers to proceed; otherwise return
182 * the count. This leaves a comfortable margin to avoid overflow,
183 * and still allows half a buffer's worth of typed-ahead commands.
184 */
185 return (count < N_TTY_BUF_SIZE/2) ? 0 : count;
186} 161}
187 162
188/* Set the lock flag on a pty */ 163/* Set the lock flag on a pty */
@@ -202,20 +177,10 @@ static void pty_flush_buffer(struct tty_struct *tty)
202{ 177{
203 struct tty_struct *to = tty->link; 178 struct tty_struct *to = tty->link;
204 unsigned long flags; 179 unsigned long flags;
205 struct tty_ldisc *ld;
206 180
207 if (!to) 181 if (!to)
208 return; 182 return;
209 ld = tty_ldisc_ref(to); 183 /* tty_buffer_flush(to); FIXME */
210
211 /* The other end is changing discipline */
212 if (!ld)
213 return;
214
215 if (ld->ops->flush_buffer)
216 to->ldisc->ops->flush_buffer(to);
217 tty_ldisc_deref(ld);
218
219 if (to->packet) { 184 if (to->packet) {
220 spin_lock_irqsave(&tty->ctrl_lock, flags); 185 spin_lock_irqsave(&tty->ctrl_lock, flags);
221 tty->ctrl_status |= TIOCPKT_FLUSHWRITE; 186 tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index ce81da5b2da9..d58c2eb07f07 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -44,6 +44,7 @@
44#include <linux/delay.h> 44#include <linux/delay.h>
45#include <linux/pci.h> 45#include <linux/pci.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/smp_lock.h>
47#include <linux/miscdevice.h> 48#include <linux/miscdevice.h>
48#include <linux/init.h> 49#include <linux/init.h>
49 50
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 217660451237..171711acf5cd 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -47,6 +47,7 @@
47#include <linux/init.h> 47#include <linux/init.h>
48#include <linux/delay.h> 48#include <linux/delay.h>
49#include <linux/tty_flip.h> 49#include <linux/tty_flip.h>
50#include <linux/smp_lock.h>
50#include <linux/spinlock.h> 51#include <linux/spinlock.h>
51#include <linux/device.h> 52#include <linux/device.h>
52 53
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 63d5b628477a..0e29a23ec4c5 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -73,6 +73,7 @@
73#include <linux/tty_driver.h> 73#include <linux/tty_driver.h>
74#include <linux/tty_flip.h> 74#include <linux/tty_flip.h>
75#include <linux/serial.h> 75#include <linux/serial.h>
76#include <linux/smp_lock.h>
76#include <linux/string.h> 77#include <linux/string.h>
77#include <linux/fcntl.h> 78#include <linux/fcntl.h>
78#include <linux/ptrace.h> 79#include <linux/ptrace.h>
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index f1f24f0ee26f..51e7a46787be 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -52,6 +52,7 @@
52#include <linux/interrupt.h> 52#include <linux/interrupt.h>
53#include <linux/serial.h> 53#include <linux/serial.h>
54#include <linux/serialP.h> 54#include <linux/serialP.h>
55#include <linux/smp_lock.h>
55#include <linux/string.h> 56#include <linux/string.h>
56#include <linux/fcntl.h> 57#include <linux/fcntl.h>
57#include <linux/ptrace.h> 58#include <linux/ptrace.h>
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index e72be4190a44..268e17f9ec3f 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -87,6 +87,7 @@
87#include <linux/tty_flip.h> 87#include <linux/tty_flip.h>
88#include <linux/mm.h> 88#include <linux/mm.h>
89#include <linux/serial.h> 89#include <linux/serial.h>
90#include <linux/smp_lock.h>
90#include <linux/fcntl.h> 91#include <linux/fcntl.h>
91#include <linux/major.h> 92#include <linux/major.h>
92#include <linux/delay.h> 93#include <linux/delay.h>
@@ -1808,10 +1809,10 @@ static int sx_tiocmset(struct tty_struct *tty, struct file *file,
1808 if (clear & TIOCM_DTR) 1809 if (clear & TIOCM_DTR)
1809 port->MSVR &= ~MSVR_DTR; 1810 port->MSVR &= ~MSVR_DTR;
1810 } 1811 }
1811 spin_lock_irqsave(&bp->lock, flags); 1812 spin_lock(&bp->lock);
1812 sx_out(bp, CD186x_CAR, port_No(port)); 1813 sx_out(bp, CD186x_CAR, port_No(port));
1813 sx_out(bp, CD186x_MSVR, port->MSVR); 1814 sx_out(bp, CD186x_MSVR, port->MSVR);
1814 spin_unlock_irqrestore(&bp->lock, flags); 1815 spin_unlock(&bp->lock);
1815 spin_unlock_irqrestore(&port->lock, flags); 1816 spin_unlock_irqrestore(&port->lock, flags);
1816 func_exit(); 1817 func_exit();
1817 return 0; 1818 return 0;
@@ -1832,11 +1833,11 @@ static int sx_send_break(struct tty_struct *tty, int length)
1832 port->break_length = SPECIALIX_TPS / HZ * length; 1833 port->break_length = SPECIALIX_TPS / HZ * length;
1833 port->COR2 |= COR2_ETC; 1834 port->COR2 |= COR2_ETC;
1834 port->IER |= IER_TXRDY; 1835 port->IER |= IER_TXRDY;
1835 spin_lock_irqsave(&bp->lock, flags); 1836 spin_lock(&bp->lock);
1836 sx_out(bp, CD186x_CAR, port_No(port)); 1837 sx_out(bp, CD186x_CAR, port_No(port));
1837 sx_out(bp, CD186x_COR2, port->COR2); 1838 sx_out(bp, CD186x_COR2, port->COR2);
1838 sx_out(bp, CD186x_IER, port->IER); 1839 sx_out(bp, CD186x_IER, port->IER);
1839 spin_unlock_irqrestore(&bp->lock, flags); 1840 spin_unlock(&bp->lock);
1840 spin_unlock_irqrestore(&port->lock, flags); 1841 spin_unlock_irqrestore(&port->lock, flags);
1841 sx_wait_CCR(bp); 1842 sx_wait_CCR(bp);
1842 spin_lock_irqsave(&bp->lock, flags); 1843 spin_lock_irqsave(&bp->lock, flags);
@@ -2022,9 +2023,9 @@ static void sx_unthrottle(struct tty_struct *tty)
2022 if (sx_crtscts(tty)) 2023 if (sx_crtscts(tty))
2023 port->MSVR |= MSVR_DTR; 2024 port->MSVR |= MSVR_DTR;
2024 /* Else clause: see remark in "sx_throttle"... */ 2025 /* Else clause: see remark in "sx_throttle"... */
2025 spin_lock_irqsave(&bp->lock, flags); 2026 spin_lock(&bp->lock);
2026 sx_out(bp, CD186x_CAR, port_No(port)); 2027 sx_out(bp, CD186x_CAR, port_No(port));
2027 spin_unlock_irqrestore(&bp->lock, flags); 2028 spin_unlock(&bp->lock);
2028 if (I_IXOFF(tty)) { 2029 if (I_IXOFF(tty)) {
2029 spin_unlock_irqrestore(&port->lock, flags); 2030 spin_unlock_irqrestore(&port->lock, flags);
2030 sx_wait_CCR(bp); 2031 sx_wait_CCR(bp);
@@ -2034,9 +2035,9 @@ static void sx_unthrottle(struct tty_struct *tty)
2034 sx_wait_CCR(bp); 2035 sx_wait_CCR(bp);
2035 spin_lock_irqsave(&port->lock, flags); 2036 spin_lock_irqsave(&port->lock, flags);
2036 } 2037 }
2037 spin_lock_irqsave(&bp->lock, flags); 2038 spin_lock(&bp->lock);
2038 sx_out(bp, CD186x_MSVR, port->MSVR); 2039 sx_out(bp, CD186x_MSVR, port->MSVR);
2039 spin_unlock_irqrestore(&bp->lock, flags); 2040 spin_unlock(&bp->lock);
2040 spin_unlock_irqrestore(&port->lock, flags); 2041 spin_unlock_irqrestore(&port->lock, flags);
2041 2042
2042 func_exit(); 2043 func_exit();
@@ -2060,10 +2061,10 @@ static void sx_stop(struct tty_struct *tty)
2060 2061
2061 spin_lock_irqsave(&port->lock, flags); 2062 spin_lock_irqsave(&port->lock, flags);
2062 port->IER &= ~IER_TXRDY; 2063 port->IER &= ~IER_TXRDY;
2063 spin_lock_irqsave(&bp->lock, flags); 2064 spin_lock(&bp->lock);
2064 sx_out(bp, CD186x_CAR, port_No(port)); 2065 sx_out(bp, CD186x_CAR, port_No(port));
2065 sx_out(bp, CD186x_IER, port->IER); 2066 sx_out(bp, CD186x_IER, port->IER);
2066 spin_unlock_irqrestore(&bp->lock, flags); 2067 spin_unlock(&bp->lock);
2067 spin_unlock_irqrestore(&port->lock, flags); 2068 spin_unlock_irqrestore(&port->lock, flags);
2068 2069
2069 func_exit(); 2070 func_exit();
@@ -2088,10 +2089,10 @@ static void sx_start(struct tty_struct *tty)
2088 spin_lock_irqsave(&port->lock, flags); 2089 spin_lock_irqsave(&port->lock, flags);
2089 if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) { 2090 if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) {
2090 port->IER |= IER_TXRDY; 2091 port->IER |= IER_TXRDY;
2091 spin_lock_irqsave(&bp->lock, flags); 2092 spin_lock(&bp->lock);
2092 sx_out(bp, CD186x_CAR, port_No(port)); 2093 sx_out(bp, CD186x_CAR, port_No(port));
2093 sx_out(bp, CD186x_IER, port->IER); 2094 sx_out(bp, CD186x_IER, port->IER);
2094 spin_unlock_irqrestore(&bp->lock, flags); 2095 spin_unlock(&bp->lock);
2095 } 2096 }
2096 spin_unlock_irqrestore(&port->lock, flags); 2097 spin_unlock_irqrestore(&port->lock, flags);
2097 2098
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index 518f2a25d91e..a81ec4fcf6ff 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -216,6 +216,7 @@
216#include <linux/eisa.h> 216#include <linux/eisa.h>
217#include <linux/pci.h> 217#include <linux/pci.h>
218#include <linux/slab.h> 218#include <linux/slab.h>
219#include <linux/smp_lock.h>
219#include <linux/init.h> 220#include <linux/init.h>
220#include <linux/miscdevice.h> 221#include <linux/miscdevice.h>
221#include <linux/bitops.h> 222#include <linux/bitops.h>
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index afded3a2379c..813552f14884 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -81,6 +81,7 @@
81#include <linux/mm.h> 81#include <linux/mm.h>
82#include <linux/seq_file.h> 82#include <linux/seq_file.h>
83#include <linux/slab.h> 83#include <linux/slab.h>
84#include <linux/smp_lock.h>
84#include <linux/delay.h> 85#include <linux/delay.h>
85#include <linux/netdevice.h> 86#include <linux/netdevice.h>
86#include <linux/vmalloc.h> 87#include <linux/vmalloc.h>
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index a2e67e6df3a1..91f20a92fddf 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -62,6 +62,7 @@
62#include <linux/mm.h> 62#include <linux/mm.h>
63#include <linux/seq_file.h> 63#include <linux/seq_file.h>
64#include <linux/slab.h> 64#include <linux/slab.h>
65#include <linux/smp_lock.h>
65#include <linux/netdevice.h> 66#include <linux/netdevice.h>
66#include <linux/vmalloc.h> 67#include <linux/vmalloc.h>
67#include <linux/init.h> 68#include <linux/init.h>
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 6f727e3c53ad..8d4a2a8a0a70 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -52,6 +52,7 @@
52#include <linux/mm.h> 52#include <linux/mm.h>
53#include <linux/seq_file.h> 53#include <linux/seq_file.h>
54#include <linux/slab.h> 54#include <linux/slab.h>
55#include <linux/smp_lock.h>
55#include <linux/netdevice.h> 56#include <linux/netdevice.h>
56#include <linux/vmalloc.h> 57#include <linux/vmalloc.h>
57#include <linux/init.h> 58#include <linux/init.h>
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 0db35857e4d8..5d7a02f63e1c 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -35,7 +35,6 @@
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/vt_kern.h> 36#include <linux/vt_kern.h>
37#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38#include <linux/kexec.h>
39#include <linux/hrtimer.h> 38#include <linux/hrtimer.h>
40#include <linux/oom.h> 39#include <linux/oom.h>
41 40
@@ -124,9 +123,12 @@ static struct sysrq_key_op sysrq_unraw_op = {
124static void sysrq_handle_crash(int key, struct tty_struct *tty) 123static void sysrq_handle_crash(int key, struct tty_struct *tty)
125{ 124{
126 char *killer = NULL; 125 char *killer = NULL;
126
127 panic_on_oops = 1; /* force panic */
128 wmb();
127 *killer = 1; 129 *killer = 1;
128} 130}
129static struct sysrq_key_op sysrq_crashdump_op = { 131static struct sysrq_key_op sysrq_crash_op = {
130 .handler = sysrq_handle_crash, 132 .handler = sysrq_handle_crash,
131 .help_msg = "Crash", 133 .help_msg = "Crash",
132 .action_msg = "Trigger a crash", 134 .action_msg = "Trigger a crash",
@@ -401,7 +403,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
401 */ 403 */
402 NULL, /* a */ 404 NULL, /* a */
403 &sysrq_reboot_op, /* b */ 405 &sysrq_reboot_op, /* b */
404 &sysrq_crashdump_op, /* c & ibm_emac driver debug */ 406 &sysrq_crash_op, /* c & ibm_emac driver debug */
405 &sysrq_showlocks_op, /* d */ 407 &sysrq_showlocks_op, /* d */
406 &sysrq_term_op, /* e */ 408 &sysrq_term_op, /* e */
407 &sysrq_moom_op, /* f */ 409 &sysrq_moom_op, /* f */
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 6062b62800fd..b3ec9b10e292 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for TANBAC TB0219 base board. 2 * Driver for TANBAC TB0219 base board.
3 * 3 *
4 * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 4 * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -28,7 +28,7 @@
28#include <asm/vr41xx/giu.h> 28#include <asm/vr41xx/giu.h>
29#include <asm/vr41xx/tb0219.h> 29#include <asm/vr41xx/tb0219.h>
30 30
31MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); 31MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
32MODULE_DESCRIPTION("TANBAC TB0219 base board driver"); 32MODULE_DESCRIPTION("TANBAC TB0219 base board driver");
33MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
34 34
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index ccdd828adcef..b0603b2e5684 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -26,7 +26,6 @@
26#include <linux/poll.h> 26#include <linux/poll.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/smp_lock.h>
30 29
31#include "tpm.h" 30#include "tpm.h"
32 31
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 810ee25d66a4..3108991c5c8b 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -462,6 +462,19 @@ static void flush_to_ldisc(struct work_struct *work)
462} 462}
463 463
464/** 464/**
465 * tty_flush_to_ldisc
466 * @tty: tty to push
467 *
468 * Push the terminal flip buffers to the line discipline.
469 *
470 * Must not be called from IRQ context.
471 */
472void tty_flush_to_ldisc(struct tty_struct *tty)
473{
474 flush_to_ldisc(&tty->buf.work.work);
475}
476
477/**
465 * tty_flip_buffer_push - terminal 478 * tty_flip_buffer_push - terminal
466 * @tty: tty to push 479 * @tty: tty to push
467 * 480 *
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index b24f6c6a1ea3..ad6ba4ed2808 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -21,7 +21,6 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/smp_lock.h>
25 24
26#include <asm/io.h> 25#include <asm/io.h>
27#include <asm/uaccess.h> 26#include <asm/uaccess.h>
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index a19e935847b0..1733d3439ad2 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -21,7 +21,6 @@
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/smp_lock.h>
25#include <linux/device.h> 24#include <linux/device.h>
26#include <linux/wait.h> 25#include <linux/wait.h>
27#include <linux/bitops.h> 26#include <linux/bitops.h>
@@ -49,6 +48,41 @@ static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
49/* Line disc dispatch table */ 48/* Line disc dispatch table */
50static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; 49static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
51 50
51static inline struct tty_ldisc *get_ldisc(struct tty_ldisc *ld)
52{
53 if (ld)
54 atomic_inc(&ld->users);
55 return ld;
56}
57
58static void put_ldisc(struct tty_ldisc *ld)
59{
60 unsigned long flags;
61
62 if (WARN_ON_ONCE(!ld))
63 return;
64
65 /*
66 * If this is the last user, free the ldisc, and
67 * release the ldisc ops.
68 *
69 * We really want an "atomic_dec_and_lock_irqsave()",
70 * but we don't have it, so this does it by hand.
71 */
72 local_irq_save(flags);
73 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
74 struct tty_ldisc_ops *ldo = ld->ops;
75
76 ldo->refcount--;
77 module_put(ldo->owner);
78 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
79
80 kfree(ld);
81 return;
82 }
83 local_irq_restore(flags);
84}
85
52/** 86/**
53 * tty_register_ldisc - install a line discipline 87 * tty_register_ldisc - install a line discipline
54 * @disc: ldisc number 88 * @disc: ldisc number
@@ -143,7 +177,7 @@ static struct tty_ldisc *tty_ldisc_try_get(int disc)
143 /* lock it */ 177 /* lock it */
144 ldops->refcount++; 178 ldops->refcount++;
145 ld->ops = ldops; 179 ld->ops = ldops;
146 ld->refcount = 0; 180 atomic_set(&ld->users, 1);
147 err = 0; 181 err = 0;
148 } 182 }
149 } 183 }
@@ -182,35 +216,6 @@ static struct tty_ldisc *tty_ldisc_get(int disc)
182 return ld; 216 return ld;
183} 217}
184 218
185/**
186 * tty_ldisc_put - drop ldisc reference
187 * @ld: ldisc
188 *
189 * Drop a reference to a line discipline. Manage refcounts and
190 * module usage counts. Free the ldisc once the recount hits zero.
191 *
192 * Locking:
193 * takes tty_ldisc_lock to guard against ldisc races
194 */
195
196static void tty_ldisc_put(struct tty_ldisc *ld)
197{
198 unsigned long flags;
199 int disc = ld->ops->num;
200 struct tty_ldisc_ops *ldo;
201
202 BUG_ON(disc < N_TTY || disc >= NR_LDISCS);
203
204 spin_lock_irqsave(&tty_ldisc_lock, flags);
205 ldo = tty_ldiscs[disc];
206 BUG_ON(ldo->refcount == 0);
207 ldo->refcount--;
208 module_put(ldo->owner);
209 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
210 WARN_ON(ld->refcount);
211 kfree(ld);
212}
213
214static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos) 219static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
215{ 220{
216 return (*pos < NR_LDISCS) ? pos : NULL; 221 return (*pos < NR_LDISCS) ? pos : NULL;
@@ -235,7 +240,7 @@ static int tty_ldiscs_seq_show(struct seq_file *m, void *v)
235 if (IS_ERR(ld)) 240 if (IS_ERR(ld))
236 return 0; 241 return 0;
237 seq_printf(m, "%-10s %2d\n", ld->ops->name ? ld->ops->name : "???", i); 242 seq_printf(m, "%-10s %2d\n", ld->ops->name ? ld->ops->name : "???", i);
238 tty_ldisc_put(ld); 243 put_ldisc(ld);
239 return 0; 244 return 0;
240} 245}
241 246
@@ -289,20 +294,17 @@ static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
289 * Locking: takes tty_ldisc_lock 294 * Locking: takes tty_ldisc_lock
290 */ 295 */
291 296
292static int tty_ldisc_try(struct tty_struct *tty) 297static struct tty_ldisc *tty_ldisc_try(struct tty_struct *tty)
293{ 298{
294 unsigned long flags; 299 unsigned long flags;
295 struct tty_ldisc *ld; 300 struct tty_ldisc *ld;
296 int ret = 0;
297 301
298 spin_lock_irqsave(&tty_ldisc_lock, flags); 302 spin_lock_irqsave(&tty_ldisc_lock, flags);
299 ld = tty->ldisc; 303 ld = NULL;
300 if (test_bit(TTY_LDISC, &tty->flags)) { 304 if (test_bit(TTY_LDISC, &tty->flags))
301 ld->refcount++; 305 ld = get_ldisc(tty->ldisc);
302 ret = 1;
303 }
304 spin_unlock_irqrestore(&tty_ldisc_lock, flags); 306 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
305 return ret; 307 return ld;
306} 308}
307 309
308/** 310/**
@@ -323,10 +325,11 @@ static int tty_ldisc_try(struct tty_struct *tty)
323 325
324struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) 326struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
325{ 327{
328 struct tty_ldisc *ld;
329
326 /* wait_event is a macro */ 330 /* wait_event is a macro */
327 wait_event(tty_ldisc_wait, tty_ldisc_try(tty)); 331 wait_event(tty_ldisc_wait, (ld = tty_ldisc_try(tty)) != NULL);
328 WARN_ON(tty->ldisc->refcount == 0); 332 return ld;
329 return tty->ldisc;
330} 333}
331EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); 334EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
332 335
@@ -343,9 +346,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
343 346
344struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) 347struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
345{ 348{
346 if (tty_ldisc_try(tty)) 349 return tty_ldisc_try(tty);
347 return tty->ldisc;
348 return NULL;
349} 350}
350EXPORT_SYMBOL_GPL(tty_ldisc_ref); 351EXPORT_SYMBOL_GPL(tty_ldisc_ref);
351 352
@@ -361,21 +362,15 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref);
361 362
362void tty_ldisc_deref(struct tty_ldisc *ld) 363void tty_ldisc_deref(struct tty_ldisc *ld)
363{ 364{
364 unsigned long flags; 365 put_ldisc(ld);
365
366 BUG_ON(ld == NULL);
367
368 spin_lock_irqsave(&tty_ldisc_lock, flags);
369 if (ld->refcount == 0)
370 printk(KERN_ERR "tty_ldisc_deref: no references.\n");
371 else
372 ld->refcount--;
373 if (ld->refcount == 0)
374 wake_up(&tty_ldisc_wait);
375 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
376} 366}
377EXPORT_SYMBOL_GPL(tty_ldisc_deref); 367EXPORT_SYMBOL_GPL(tty_ldisc_deref);
378 368
369static inline void tty_ldisc_put(struct tty_ldisc *ld)
370{
371 put_ldisc(ld);
372}
373
379/** 374/**
380 * tty_ldisc_enable - allow ldisc use 375 * tty_ldisc_enable - allow ldisc use
381 * @tty: terminal to activate ldisc on 376 * @tty: terminal to activate ldisc on
@@ -524,31 +519,6 @@ static int tty_ldisc_halt(struct tty_struct *tty)
524} 519}
525 520
526/** 521/**
527 * tty_ldisc_wait_idle - wait for the ldisc to become idle
528 * @tty: tty to wait for
529 *
530 * Wait for the line discipline to become idle. The discipline must
531 * have been halted for this to guarantee it remains idle.
532 *
533 * tty_ldisc_lock protects the ref counts currently.
534 */
535
536static int tty_ldisc_wait_idle(struct tty_struct *tty)
537{
538 unsigned long flags;
539 spin_lock_irqsave(&tty_ldisc_lock, flags);
540 while (tty->ldisc->refcount) {
541 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
542 if (wait_event_timeout(tty_ldisc_wait,
543 tty->ldisc->refcount == 0, 5 * HZ) == 0)
544 return -EBUSY;
545 spin_lock_irqsave(&tty_ldisc_lock, flags);
546 }
547 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
548 return 0;
549}
550
551/**
552 * tty_set_ldisc - set line discipline 522 * tty_set_ldisc - set line discipline
553 * @tty: the terminal to set 523 * @tty: the terminal to set
554 * @ldisc: the line discipline 524 * @ldisc: the line discipline
@@ -643,14 +613,6 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
643 613
644 flush_scheduled_work(); 614 flush_scheduled_work();
645 615
646 /* Let any existing reference holders finish */
647 retval = tty_ldisc_wait_idle(tty);
648 if (retval < 0) {
649 clear_bit(TTY_LDISC_CHANGING, &tty->flags);
650 tty_ldisc_put(new_ldisc);
651 return retval;
652 }
653
654 mutex_lock(&tty->ldisc_mutex); 616 mutex_lock(&tty->ldisc_mutex);
655 if (test_bit(TTY_HUPPED, &tty->flags)) { 617 if (test_bit(TTY_HUPPED, &tty->flags)) {
656 /* We were raced by the hangup method. It will have stomped 618 /* We were raced by the hangup method. It will have stomped
@@ -791,17 +753,19 @@ void tty_ldisc_hangup(struct tty_struct *tty)
791 * N_TTY. 753 * N_TTY.
792 */ 754 */
793 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { 755 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
794 /* Avoid racing set_ldisc */ 756 /* Avoid racing set_ldisc or tty_ldisc_release */
795 mutex_lock(&tty->ldisc_mutex); 757 mutex_lock(&tty->ldisc_mutex);
796 /* Switch back to N_TTY */ 758 if (tty->ldisc) { /* Not yet closed */
797 tty_ldisc_halt(tty); 759 /* Switch back to N_TTY */
798 tty_ldisc_wait_idle(tty); 760 tty_ldisc_halt(tty);
799 tty_ldisc_reinit(tty); 761 tty_ldisc_reinit(tty);
800 /* At this point we have a closed ldisc and we want to 762 /* At this point we have a closed ldisc and we want to
801 reopen it. We could defer this to the next open but 763 reopen it. We could defer this to the next open but
802 it means auditing a lot of other paths so this is a FIXME */ 764 it means auditing a lot of other paths so this is
803 WARN_ON(tty_ldisc_open(tty, tty->ldisc)); 765 a FIXME */
804 tty_ldisc_enable(tty); 766 WARN_ON(tty_ldisc_open(tty, tty->ldisc));
767 tty_ldisc_enable(tty);
768 }
805 mutex_unlock(&tty->ldisc_mutex); 769 mutex_unlock(&tty->ldisc_mutex);
806 tty_reset_termios(tty); 770 tty_reset_termios(tty);
807 } 771 }
@@ -858,24 +822,25 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
858 tty_ldisc_halt(tty); 822 tty_ldisc_halt(tty);
859 flush_scheduled_work(); 823 flush_scheduled_work();
860 824
825 mutex_lock(&tty->ldisc_mutex);
861 /* 826 /*
862 * Wait for any short term users (we know they are just driver 827 * Now kill off the ldisc
863 * side waiters as the file is closing so user count on the file
864 * side is zero.
865 */ 828 */
829 tty_ldisc_close(tty, tty->ldisc);
830 tty_ldisc_put(tty->ldisc);
831 /* Force an oops if we mess this up */
832 tty->ldisc = NULL;
866 833
867 tty_ldisc_wait_idle(tty); 834 /* Ensure the next open requests the N_TTY ldisc */
868 835 tty_set_termios_ldisc(tty, N_TTY);
869 /* 836 mutex_unlock(&tty->ldisc_mutex);
870 * Shutdown the current line discipline, and reset it to N_TTY.
871 *
872 * FIXME: this MUST get fixed for the new reflocking
873 */
874 837
875 tty_ldisc_reinit(tty);
876 /* This will need doing differently if we need to lock */ 838 /* This will need doing differently if we need to lock */
877 if (o_tty) 839 if (o_tty)
878 tty_ldisc_release(o_tty, NULL); 840 tty_ldisc_release(o_tty, NULL);
841
842 /* And the memory resources remaining (buffers, termios) will be
843 disposed of when the kref hits zero */
879} 844}
880 845
881/** 846/**
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 4e862a75f7ff..9769b1149f76 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -267,7 +267,7 @@ int tty_port_block_til_ready(struct tty_port *port,
267 if (retval == 0) 267 if (retval == 0)
268 port->flags |= ASYNC_NORMAL_ACTIVE; 268 port->flags |= ASYNC_NORMAL_ACTIVE;
269 spin_unlock_irqrestore(&port->lock, flags); 269 spin_unlock_irqrestore(&port->lock, flags);
270 return 0; 270 return retval;
271 271
272} 272}
273EXPORT_SYMBOL(tty_port_block_til_ready); 273EXPORT_SYMBOL(tty_port_block_til_ready);
diff --git a/drivers/char/vc_screen.c b/drivers/char/vc_screen.c
index d94d25c12aa8..c1791a63d99d 100644
--- a/drivers/char/vc_screen.c
+++ b/drivers/char/vc_screen.c
@@ -495,11 +495,15 @@ void vcs_remove_sysfs(int index)
495 495
496int __init vcs_init(void) 496int __init vcs_init(void)
497{ 497{
498 unsigned int i;
499
498 if (register_chrdev(VCS_MAJOR, "vcs", &vcs_fops)) 500 if (register_chrdev(VCS_MAJOR, "vcs", &vcs_fops))
499 panic("unable to get major %d for vcs device", VCS_MAJOR); 501 panic("unable to get major %d for vcs device", VCS_MAJOR);
500 vc_class = class_create(THIS_MODULE, "vc"); 502 vc_class = class_create(THIS_MODULE, "vc");
501 503
502 device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs"); 504 device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs");
503 device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa"); 505 device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa");
506 for (i = 0; i < MIN_NR_CONSOLES; i++)
507 vcs_make_sysfs(i);
504 return 0; 508 return 0;
505} 509}
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index d9113b4c76e3..404f4c1ee431 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -89,6 +89,7 @@
89#include <linux/mutex.h> 89#include <linux/mutex.h>
90#include <linux/vt_kern.h> 90#include <linux/vt_kern.h>
91#include <linux/selection.h> 91#include <linux/selection.h>
92#include <linux/smp_lock.h>
92#include <linux/tiocl.h> 93#include <linux/tiocl.h>
93#include <linux/kbd_kern.h> 94#include <linux/kbd_kern.h>
94#include <linux/consolemap.h> 95#include <linux/consolemap.h>
@@ -769,14 +770,12 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
769 visual_init(vc, currcons, 1); 770 visual_init(vc, currcons, 1);
770 if (!*vc->vc_uni_pagedir_loc) 771 if (!*vc->vc_uni_pagedir_loc)
771 con_set_default_unimap(vc); 772 con_set_default_unimap(vc);
772 if (!vc->vc_kmalloced) 773 vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
773 vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
774 if (!vc->vc_screenbuf) { 774 if (!vc->vc_screenbuf) {
775 kfree(vc); 775 kfree(vc);
776 vc_cons[currcons].d = NULL; 776 vc_cons[currcons].d = NULL;
777 return -ENOMEM; 777 return -ENOMEM;
778 } 778 }
779 vc->vc_kmalloced = 1;
780 vc_init(vc, vc->vc_rows, vc->vc_cols, 1); 779 vc_init(vc, vc->vc_rows, vc->vc_cols, 1);
781 vcs_make_sysfs(currcons); 780 vcs_make_sysfs(currcons);
782 atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, &param); 781 atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, &param);
@@ -912,10 +911,8 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
912 if (new_scr_end > new_origin) 911 if (new_scr_end > new_origin)
913 scr_memsetw((void *)new_origin, vc->vc_video_erase_char, 912 scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
914 new_scr_end - new_origin); 913 new_scr_end - new_origin);
915 if (vc->vc_kmalloced) 914 kfree(vc->vc_screenbuf);
916 kfree(vc->vc_screenbuf);
917 vc->vc_screenbuf = newscreen; 915 vc->vc_screenbuf = newscreen;
918 vc->vc_kmalloced = 1;
919 vc->vc_screenbuf_size = new_screen_size; 916 vc->vc_screenbuf_size = new_screen_size;
920 set_origin(vc); 917 set_origin(vc);
921 918
@@ -994,8 +991,7 @@ void vc_deallocate(unsigned int currcons)
994 vc->vc_sw->con_deinit(vc); 991 vc->vc_sw->con_deinit(vc);
995 put_pid(vc->vt_pid); 992 put_pid(vc->vt_pid);
996 module_put(vc->vc_sw->owner); 993 module_put(vc->vc_sw->owner);
997 if (vc->vc_kmalloced) 994 kfree(vc->vc_screenbuf);
998 kfree(vc->vc_screenbuf);
999 if (currcons >= MIN_NR_CONSOLES) 995 if (currcons >= MIN_NR_CONSOLES)
1000 kfree(vc); 996 kfree(vc);
1001 vc_cons[currcons].d = NULL; 997 vc_cons[currcons].d = NULL;
@@ -2880,7 +2876,6 @@ static int __init con_init(void)
2880 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); 2876 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
2881 visual_init(vc, currcons, 1); 2877 visual_init(vc, currcons, 1);
2882 vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); 2878 vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
2883 vc->vc_kmalloced = 0;
2884 vc_init(vc, vc->vc_rows, vc->vc_cols, 2879 vc_init(vc, vc->vc_rows, vc->vc_cols,
2885 currcons || !vc->vc_sw->con_save_screen); 2880 currcons || !vc->vc_sw->con_save_screen);
2886 } 2881 }
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 7539bed0f7e0..95189f288f8c 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -25,6 +25,7 @@
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/consolemap.h> 26#include <linux/consolemap.h>
27#include <linux/signal.h> 27#include <linux/signal.h>
28#include <linux/smp_lock.h>
28#include <linux/timex.h> 29#include <linux/timex.h>
29 30
30#include <asm/io.h> 31#include <asm/io.h>
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 9ffb05f4095d..93c2322feab7 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -161,7 +161,7 @@ static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
161 if (periodic) 161 if (periodic)
162 sh_tmu_write(p, TCOR, delta); 162 sh_tmu_write(p, TCOR, delta);
163 else 163 else
164 sh_tmu_write(p, TCOR, 0); 164 sh_tmu_write(p, TCOR, 0xffffffff);
165 165
166 sh_tmu_write(p, TCNT, delta); 166 sh_tmu_write(p, TCNT, delta);
167 167
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index c769ef269fb5..408c2af25d50 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cn_queue.c 2 * cn_queue.c
3 * 3 *
4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index fd336c5a9057..08b2500f21ec 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * connector.c 2 * connector.c
3 * 3 *
4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -33,7 +33,7 @@
33#include <net/sock.h> 33#include <net/sock.h>
34 34
35MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 36MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
37MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); 37MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
38 38
39static u32 cn_idx = CN_IDX_CONNECTOR; 39static u32 cn_idx = CN_IDX_CONNECTOR;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e2ec0b18948..fd69086d08d5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -761,6 +761,10 @@ static struct kobj_type ktype_cpufreq = {
761 * cpufreq_add_dev - add a CPU device 761 * cpufreq_add_dev - add a CPU device
762 * 762 *
763 * Adds the cpufreq interface for a CPU device. 763 * Adds the cpufreq interface for a CPU device.
764 *
765 * The Oracle says: try running cpufreq registration/unregistration concurrently
766 * with with cpu hotplugging and all hell will break loose. Tried to clean this
767 * mess up, but more thorough testing is needed. - Mathieu
764 */ 768 */
765static int cpufreq_add_dev(struct sys_device *sys_dev) 769static int cpufreq_add_dev(struct sys_device *sys_dev)
766{ 770{
@@ -772,9 +776,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
772 struct sys_device *cpu_sys_dev; 776 struct sys_device *cpu_sys_dev;
773 unsigned long flags; 777 unsigned long flags;
774 unsigned int j; 778 unsigned int j;
775#ifdef CONFIG_SMP
776 struct cpufreq_policy *managed_policy;
777#endif
778 779
779 if (cpu_is_offline(cpu)) 780 if (cpu_is_offline(cpu))
780 return 0; 781 return 0;
@@ -804,15 +805,12 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
804 goto nomem_out; 805 goto nomem_out;
805 } 806 }
806 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { 807 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
807 kfree(policy);
808 ret = -ENOMEM; 808 ret = -ENOMEM;
809 goto nomem_out; 809 goto err_free_policy;
810 } 810 }
811 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { 811 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
812 free_cpumask_var(policy->cpus);
813 kfree(policy);
814 ret = -ENOMEM; 812 ret = -ENOMEM;
815 goto nomem_out; 813 goto err_free_cpumask;
816 } 814 }
817 815
818 policy->cpu = cpu; 816 policy->cpu = cpu;
@@ -820,7 +818,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
820 818
821 /* Initially set CPU itself as the policy_cpu */ 819 /* Initially set CPU itself as the policy_cpu */
822 per_cpu(policy_cpu, cpu) = cpu; 820 per_cpu(policy_cpu, cpu) = cpu;
823 lock_policy_rwsem_write(cpu); 821 ret = (lock_policy_rwsem_write(cpu) < 0);
822 WARN_ON(ret);
824 823
825 init_completion(&policy->kobj_unregister); 824 init_completion(&policy->kobj_unregister);
826 INIT_WORK(&policy->update, handle_update); 825 INIT_WORK(&policy->update, handle_update);
@@ -833,7 +832,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
833 ret = cpufreq_driver->init(policy); 832 ret = cpufreq_driver->init(policy);
834 if (ret) { 833 if (ret) {
835 dprintk("initialization failed\n"); 834 dprintk("initialization failed\n");
836 goto err_out; 835 goto err_unlock_policy;
837 } 836 }
838 policy->user_policy.min = policy->min; 837 policy->user_policy.min = policy->min;
839 policy->user_policy.max = policy->max; 838 policy->user_policy.max = policy->max;
@@ -852,21 +851,31 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
852#endif 851#endif
853 852
854 for_each_cpu(j, policy->cpus) { 853 for_each_cpu(j, policy->cpus) {
854 struct cpufreq_policy *managed_policy;
855
855 if (cpu == j) 856 if (cpu == j)
856 continue; 857 continue;
857 858
858 /* Check for existing affected CPUs. 859 /* Check for existing affected CPUs.
859 * They may not be aware of it due to CPU Hotplug. 860 * They may not be aware of it due to CPU Hotplug.
861 * cpufreq_cpu_put is called when the device is removed
862 * in __cpufreq_remove_dev()
860 */ 863 */
861 managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */ 864 managed_policy = cpufreq_cpu_get(j);
862 if (unlikely(managed_policy)) { 865 if (unlikely(managed_policy)) {
863 866
864 /* Set proper policy_cpu */ 867 /* Set proper policy_cpu */
865 unlock_policy_rwsem_write(cpu); 868 unlock_policy_rwsem_write(cpu);
866 per_cpu(policy_cpu, cpu) = managed_policy->cpu; 869 per_cpu(policy_cpu, cpu) = managed_policy->cpu;
867 870
868 if (lock_policy_rwsem_write(cpu) < 0) 871 if (lock_policy_rwsem_write(cpu) < 0) {
869 goto err_out_driver_exit; 872 /* Should not go through policy unlock path */
873 if (cpufreq_driver->exit)
874 cpufreq_driver->exit(policy);
875 ret = -EBUSY;
876 cpufreq_cpu_put(managed_policy);
877 goto err_free_cpumask;
878 }
870 879
871 spin_lock_irqsave(&cpufreq_driver_lock, flags); 880 spin_lock_irqsave(&cpufreq_driver_lock, flags);
872 cpumask_copy(managed_policy->cpus, policy->cpus); 881 cpumask_copy(managed_policy->cpus, policy->cpus);
@@ -878,11 +887,13 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
878 &managed_policy->kobj, 887 &managed_policy->kobj,
879 "cpufreq"); 888 "cpufreq");
880 if (ret) 889 if (ret)
881 goto err_out_driver_exit; 890 cpufreq_cpu_put(managed_policy);
882 891 /*
883 cpufreq_debug_enable_ratelimit(); 892 * Success. We only needed to be added to the mask.
884 ret = 0; 893 * Call driver->exit() because only the cpu parent of
885 goto err_out_driver_exit; /* call driver->exit() */ 894 * the kobj needed to call init().
895 */
896 goto out_driver_exit; /* call driver->exit() */
886 } 897 }
887 } 898 }
888#endif 899#endif
@@ -892,29 +903,31 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
892 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, 903 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
893 "cpufreq"); 904 "cpufreq");
894 if (ret) 905 if (ret)
895 goto err_out_driver_exit; 906 goto out_driver_exit;
896 907
897 /* set up files for this cpu device */ 908 /* set up files for this cpu device */
898 drv_attr = cpufreq_driver->attr; 909 drv_attr = cpufreq_driver->attr;
899 while ((drv_attr) && (*drv_attr)) { 910 while ((drv_attr) && (*drv_attr)) {
900 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 911 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
901 if (ret) 912 if (ret)
902 goto err_out_driver_exit; 913 goto err_out_kobj_put;
903 drv_attr++; 914 drv_attr++;
904 } 915 }
905 if (cpufreq_driver->get) { 916 if (cpufreq_driver->get) {
906 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 917 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
907 if (ret) 918 if (ret)
908 goto err_out_driver_exit; 919 goto err_out_kobj_put;
909 } 920 }
910 if (cpufreq_driver->target) { 921 if (cpufreq_driver->target) {
911 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 922 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
912 if (ret) 923 if (ret)
913 goto err_out_driver_exit; 924 goto err_out_kobj_put;
914 } 925 }
915 926
916 spin_lock_irqsave(&cpufreq_driver_lock, flags); 927 spin_lock_irqsave(&cpufreq_driver_lock, flags);
917 for_each_cpu(j, policy->cpus) { 928 for_each_cpu(j, policy->cpus) {
929 if (!cpu_online(j))
930 continue;
918 per_cpu(cpufreq_cpu_data, j) = policy; 931 per_cpu(cpufreq_cpu_data, j) = policy;
919 per_cpu(policy_cpu, j) = policy->cpu; 932 per_cpu(policy_cpu, j) = policy->cpu;
920 } 933 }
@@ -922,18 +935,22 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
922 935
923 /* symlink affected CPUs */ 936 /* symlink affected CPUs */
924 for_each_cpu(j, policy->cpus) { 937 for_each_cpu(j, policy->cpus) {
938 struct cpufreq_policy *managed_policy;
939
925 if (j == cpu) 940 if (j == cpu)
926 continue; 941 continue;
927 if (!cpu_online(j)) 942 if (!cpu_online(j))
928 continue; 943 continue;
929 944
930 dprintk("CPU %u already managed, adding link\n", j); 945 dprintk("CPU %u already managed, adding link\n", j);
931 cpufreq_cpu_get(cpu); 946 managed_policy = cpufreq_cpu_get(cpu);
932 cpu_sys_dev = get_cpu_sysdev(j); 947 cpu_sys_dev = get_cpu_sysdev(j);
933 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, 948 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
934 "cpufreq"); 949 "cpufreq");
935 if (ret) 950 if (ret) {
951 cpufreq_cpu_put(managed_policy);
936 goto err_out_unregister; 952 goto err_out_unregister;
953 }
937 } 954 }
938 955
939 policy->governor = NULL; /* to assure that the starting sequence is 956 policy->governor = NULL; /* to assure that the starting sequence is
@@ -965,17 +982,20 @@ err_out_unregister:
965 per_cpu(cpufreq_cpu_data, j) = NULL; 982 per_cpu(cpufreq_cpu_data, j) = NULL;
966 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 983 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
967 984
985err_out_kobj_put:
968 kobject_put(&policy->kobj); 986 kobject_put(&policy->kobj);
969 wait_for_completion(&policy->kobj_unregister); 987 wait_for_completion(&policy->kobj_unregister);
970 988
971err_out_driver_exit: 989out_driver_exit:
972 if (cpufreq_driver->exit) 990 if (cpufreq_driver->exit)
973 cpufreq_driver->exit(policy); 991 cpufreq_driver->exit(policy);
974 992
975err_out: 993err_unlock_policy:
976 unlock_policy_rwsem_write(cpu); 994 unlock_policy_rwsem_write(cpu);
995err_free_cpumask:
996 free_cpumask_var(policy->cpus);
997err_free_policy:
977 kfree(policy); 998 kfree(policy);
978
979nomem_out: 999nomem_out:
980 module_put(cpufreq_driver->owner); 1000 module_put(cpufreq_driver->owner);
981module_out: 1001module_out:
@@ -1070,8 +1090,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1070 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1090 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1071#endif 1091#endif
1072 1092
1073 unlock_policy_rwsem_write(cpu);
1074
1075 if (cpufreq_driver->target) 1093 if (cpufreq_driver->target)
1076 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1094 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1077 1095
@@ -1088,6 +1106,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1088 if (cpufreq_driver->exit) 1106 if (cpufreq_driver->exit)
1089 cpufreq_driver->exit(data); 1107 cpufreq_driver->exit(data);
1090 1108
1109 unlock_policy_rwsem_write(cpu);
1110
1091 free_cpumask_var(data->related_cpus); 1111 free_cpumask_var(data->related_cpus);
1092 free_cpumask_var(data->cpus); 1112 free_cpumask_var(data->cpus);
1093 kfree(data); 1113 kfree(data);
@@ -1228,13 +1248,22 @@ EXPORT_SYMBOL(cpufreq_get);
1228 1248
1229static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) 1249static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1230{ 1250{
1231 int cpu = sysdev->id;
1232 int ret = 0; 1251 int ret = 0;
1252
1253#ifdef __powerpc__
1254 int cpu = sysdev->id;
1233 unsigned int cur_freq = 0; 1255 unsigned int cur_freq = 0;
1234 struct cpufreq_policy *cpu_policy; 1256 struct cpufreq_policy *cpu_policy;
1235 1257
1236 dprintk("suspending cpu %u\n", cpu); 1258 dprintk("suspending cpu %u\n", cpu);
1237 1259
1260 /*
1261 * This whole bogosity is here because Powerbooks are made of fail.
1262 * No sane platform should need any of the code below to be run.
1263 * (it's entirely the wrong thing to do, as driver->get may
1264 * reenable interrupts on some architectures).
1265 */
1266
1238 if (!cpu_online(cpu)) 1267 if (!cpu_online(cpu))
1239 return 0; 1268 return 0;
1240 1269
@@ -1293,6 +1322,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1293 1322
1294out: 1323out:
1295 cpufreq_cpu_put(cpu_policy); 1324 cpufreq_cpu_put(cpu_policy);
1325#endif /* __powerpc__ */
1296 return ret; 1326 return ret;
1297} 1327}
1298 1328
@@ -1306,12 +1336,18 @@ out:
1306 */ 1336 */
1307static int cpufreq_resume(struct sys_device *sysdev) 1337static int cpufreq_resume(struct sys_device *sysdev)
1308{ 1338{
1309 int cpu = sysdev->id;
1310 int ret = 0; 1339 int ret = 0;
1340
1341#ifdef __powerpc__
1342 int cpu = sysdev->id;
1311 struct cpufreq_policy *cpu_policy; 1343 struct cpufreq_policy *cpu_policy;
1312 1344
1313 dprintk("resuming cpu %u\n", cpu); 1345 dprintk("resuming cpu %u\n", cpu);
1314 1346
1347 /* As with the ->suspend method, all the code below is
1348 * only necessary because Powerbooks suck.
1349 * See commit 42d4dc3f4e1e for jokes. */
1350
1315 if (!cpu_online(cpu)) 1351 if (!cpu_online(cpu))
1316 return 0; 1352 return 0;
1317 1353
@@ -1375,6 +1411,7 @@ out:
1375 schedule_work(&cpu_policy->update); 1411 schedule_work(&cpu_policy->update);
1376fail: 1412fail:
1377 cpufreq_cpu_put(cpu_policy); 1413 cpufreq_cpu_put(cpu_policy);
1414#endif /* __powerpc__ */
1378 return ret; 1415 return ret;
1379} 1416}
1380 1417
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7fc58af748b4..bdea7e2f94ba 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -64,21 +64,20 @@ struct cpu_dbs_info_s {
64 unsigned int requested_freq; 64 unsigned int requested_freq;
65 int cpu; 65 int cpu;
66 unsigned int enable:1; 66 unsigned int enable:1;
67 /*
68 * percpu mutex that serializes governor limit change with
69 * do_dbs_timer invocation. We do not want do_dbs_timer to run
70 * when user is changing the governor or limits.
71 */
72 struct mutex timer_mutex;
67}; 73};
68static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 74static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
69 75
70static unsigned int dbs_enable; /* number of CPUs using this policy */ 76static unsigned int dbs_enable; /* number of CPUs using this policy */
71 77
72/* 78/*
73 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 79 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
74 * lock and dbs_mutex. cpu_hotplug lock should always be held before 80 * different CPUs. It protects dbs_enable in governor start/stop.
75 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
76 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
77 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
78 * is recursive for the same process. -Venki
79 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
80 * would deadlock with cancel_delayed_work_sync(), which is needed for proper
81 * raceless workqueue teardown.
82 */ 81 */
83static DEFINE_MUTEX(dbs_mutex); 82static DEFINE_MUTEX(dbs_mutex);
84 83
@@ -488,18 +487,12 @@ static void do_dbs_timer(struct work_struct *work)
488 487
489 delay -= jiffies % delay; 488 delay -= jiffies % delay;
490 489
491 if (lock_policy_rwsem_write(cpu) < 0) 490 mutex_lock(&dbs_info->timer_mutex);
492 return;
493
494 if (!dbs_info->enable) {
495 unlock_policy_rwsem_write(cpu);
496 return;
497 }
498 491
499 dbs_check_cpu(dbs_info); 492 dbs_check_cpu(dbs_info);
500 493
501 queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); 494 queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
502 unlock_policy_rwsem_write(cpu); 495 mutex_unlock(&dbs_info->timer_mutex);
503} 496}
504 497
505static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) 498static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
@@ -535,9 +528,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
535 if ((!cpu_online(cpu)) || (!policy->cur)) 528 if ((!cpu_online(cpu)) || (!policy->cur))
536 return -EINVAL; 529 return -EINVAL;
537 530
538 if (this_dbs_info->enable) /* Already enabled */
539 break;
540
541 mutex_lock(&dbs_mutex); 531 mutex_lock(&dbs_mutex);
542 532
543 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 533 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
@@ -561,6 +551,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
561 this_dbs_info->down_skip = 0; 551 this_dbs_info->down_skip = 0;
562 this_dbs_info->requested_freq = policy->cur; 552 this_dbs_info->requested_freq = policy->cur;
563 553
554 mutex_init(&this_dbs_info->timer_mutex);
564 dbs_enable++; 555 dbs_enable++;
565 /* 556 /*
566 * Start the timerschedule work, when this governor 557 * Start the timerschedule work, when this governor
@@ -590,17 +581,19 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
590 &dbs_cpufreq_notifier_block, 581 &dbs_cpufreq_notifier_block,
591 CPUFREQ_TRANSITION_NOTIFIER); 582 CPUFREQ_TRANSITION_NOTIFIER);
592 } 583 }
593 dbs_timer_init(this_dbs_info);
594
595 mutex_unlock(&dbs_mutex); 584 mutex_unlock(&dbs_mutex);
596 585
586 dbs_timer_init(this_dbs_info);
587
597 break; 588 break;
598 589
599 case CPUFREQ_GOV_STOP: 590 case CPUFREQ_GOV_STOP:
600 mutex_lock(&dbs_mutex);
601 dbs_timer_exit(this_dbs_info); 591 dbs_timer_exit(this_dbs_info);
592
593 mutex_lock(&dbs_mutex);
602 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 594 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
603 dbs_enable--; 595 dbs_enable--;
596 mutex_destroy(&this_dbs_info->timer_mutex);
604 597
605 /* 598 /*
606 * Stop the timerschedule work, when this governor 599 * Stop the timerschedule work, when this governor
@@ -616,7 +609,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
616 break; 609 break;
617 610
618 case CPUFREQ_GOV_LIMITS: 611 case CPUFREQ_GOV_LIMITS:
619 mutex_lock(&dbs_mutex); 612 mutex_lock(&this_dbs_info->timer_mutex);
620 if (policy->max < this_dbs_info->cur_policy->cur) 613 if (policy->max < this_dbs_info->cur_policy->cur)
621 __cpufreq_driver_target( 614 __cpufreq_driver_target(
622 this_dbs_info->cur_policy, 615 this_dbs_info->cur_policy,
@@ -625,7 +618,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
625 __cpufreq_driver_target( 618 __cpufreq_driver_target(
626 this_dbs_info->cur_policy, 619 this_dbs_info->cur_policy,
627 policy->min, CPUFREQ_RELATION_L); 620 policy->min, CPUFREQ_RELATION_L);
628 mutex_unlock(&dbs_mutex); 621 mutex_unlock(&this_dbs_info->timer_mutex);
629 622
630 break; 623 break;
631 } 624 }
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 1911d1729353..d6ba14276bb1 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -70,23 +70,21 @@ struct cpu_dbs_info_s {
70 unsigned int freq_lo_jiffies; 70 unsigned int freq_lo_jiffies;
71 unsigned int freq_hi_jiffies; 71 unsigned int freq_hi_jiffies;
72 int cpu; 72 int cpu;
73 unsigned int enable:1, 73 unsigned int sample_type:1;
74 sample_type:1; 74 /*
75 * percpu mutex that serializes governor limit change with
76 * do_dbs_timer invocation. We do not want do_dbs_timer to run
77 * when user is changing the governor or limits.
78 */
79 struct mutex timer_mutex;
75}; 80};
76static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 81static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
77 82
78static unsigned int dbs_enable; /* number of CPUs using this policy */ 83static unsigned int dbs_enable; /* number of CPUs using this policy */
79 84
80/* 85/*
81 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 86 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
82 * lock and dbs_mutex. cpu_hotplug lock should always be held before 87 * different CPUs. It protects dbs_enable in governor start/stop.
83 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
84 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
85 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
86 * is recursive for the same process. -Venki
87 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
88 * would deadlock with cancel_delayed_work_sync(), which is needed for proper
89 * raceless workqueue teardown.
90 */ 88 */
91static DEFINE_MUTEX(dbs_mutex); 89static DEFINE_MUTEX(dbs_mutex);
92 90
@@ -192,13 +190,18 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
192 return freq_hi; 190 return freq_hi;
193} 191}
194 192
193static void ondemand_powersave_bias_init_cpu(int cpu)
194{
195 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
196 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
197 dbs_info->freq_lo = 0;
198}
199
195static void ondemand_powersave_bias_init(void) 200static void ondemand_powersave_bias_init(void)
196{ 201{
197 int i; 202 int i;
198 for_each_online_cpu(i) { 203 for_each_online_cpu(i) {
199 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); 204 ondemand_powersave_bias_init_cpu(i);
200 dbs_info->freq_table = cpufreq_frequency_get_table(i);
201 dbs_info->freq_lo = 0;
202 } 205 }
203} 206}
204 207
@@ -240,12 +243,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
240 unsigned int input; 243 unsigned int input;
241 int ret; 244 int ret;
242 ret = sscanf(buf, "%u", &input); 245 ret = sscanf(buf, "%u", &input);
246 if (ret != 1)
247 return -EINVAL;
243 248
244 mutex_lock(&dbs_mutex); 249 mutex_lock(&dbs_mutex);
245 if (ret != 1) {
246 mutex_unlock(&dbs_mutex);
247 return -EINVAL;
248 }
249 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); 250 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
250 mutex_unlock(&dbs_mutex); 251 mutex_unlock(&dbs_mutex);
251 252
@@ -259,13 +260,12 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
259 int ret; 260 int ret;
260 ret = sscanf(buf, "%u", &input); 261 ret = sscanf(buf, "%u", &input);
261 262
262 mutex_lock(&dbs_mutex);
263 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 263 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
264 input < MIN_FREQUENCY_UP_THRESHOLD) { 264 input < MIN_FREQUENCY_UP_THRESHOLD) {
265 mutex_unlock(&dbs_mutex);
266 return -EINVAL; 265 return -EINVAL;
267 } 266 }
268 267
268 mutex_lock(&dbs_mutex);
269 dbs_tuners_ins.up_threshold = input; 269 dbs_tuners_ins.up_threshold = input;
270 mutex_unlock(&dbs_mutex); 270 mutex_unlock(&dbs_mutex);
271 271
@@ -363,9 +363,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
363 struct cpufreq_policy *policy; 363 struct cpufreq_policy *policy;
364 unsigned int j; 364 unsigned int j;
365 365
366 if (!this_dbs_info->enable)
367 return;
368
369 this_dbs_info->freq_lo = 0; 366 this_dbs_info->freq_lo = 0;
370 policy = this_dbs_info->cur_policy; 367 policy = this_dbs_info->cur_policy;
371 368
@@ -493,14 +490,7 @@ static void do_dbs_timer(struct work_struct *work)
493 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 490 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
494 491
495 delay -= jiffies % delay; 492 delay -= jiffies % delay;
496 493 mutex_lock(&dbs_info->timer_mutex);
497 if (lock_policy_rwsem_write(cpu) < 0)
498 return;
499
500 if (!dbs_info->enable) {
501 unlock_policy_rwsem_write(cpu);
502 return;
503 }
504 494
505 /* Common NORMAL_SAMPLE setup */ 495 /* Common NORMAL_SAMPLE setup */
506 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 496 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
@@ -517,7 +507,7 @@ static void do_dbs_timer(struct work_struct *work)
517 dbs_info->freq_lo, CPUFREQ_RELATION_H); 507 dbs_info->freq_lo, CPUFREQ_RELATION_H);
518 } 508 }
519 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 509 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
520 unlock_policy_rwsem_write(cpu); 510 mutex_unlock(&dbs_info->timer_mutex);
521} 511}
522 512
523static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) 513static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
@@ -526,8 +516,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
526 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 516 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
527 delay -= jiffies % delay; 517 delay -= jiffies % delay;
528 518
529 dbs_info->enable = 1;
530 ondemand_powersave_bias_init();
531 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 519 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
532 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 520 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
533 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, 521 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
@@ -536,7 +524,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
536 524
537static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 525static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
538{ 526{
539 dbs_info->enable = 0;
540 cancel_delayed_work_sync(&dbs_info->work); 527 cancel_delayed_work_sync(&dbs_info->work);
541} 528}
542 529
@@ -555,19 +542,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
555 if ((!cpu_online(cpu)) || (!policy->cur)) 542 if ((!cpu_online(cpu)) || (!policy->cur))
556 return -EINVAL; 543 return -EINVAL;
557 544
558 if (this_dbs_info->enable) /* Already enabled */
559 break;
560
561 mutex_lock(&dbs_mutex); 545 mutex_lock(&dbs_mutex);
562 dbs_enable++;
563 546
564 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 547 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
565 if (rc) { 548 if (rc) {
566 dbs_enable--;
567 mutex_unlock(&dbs_mutex); 549 mutex_unlock(&dbs_mutex);
568 return rc; 550 return rc;
569 } 551 }
570 552
553 dbs_enable++;
571 for_each_cpu(j, policy->cpus) { 554 for_each_cpu(j, policy->cpus) {
572 struct cpu_dbs_info_s *j_dbs_info; 555 struct cpu_dbs_info_s *j_dbs_info;
573 j_dbs_info = &per_cpu(cpu_dbs_info, j); 556 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -581,6 +564,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
581 } 564 }
582 } 565 }
583 this_dbs_info->cpu = cpu; 566 this_dbs_info->cpu = cpu;
567 ondemand_powersave_bias_init_cpu(cpu);
568 mutex_init(&this_dbs_info->timer_mutex);
584 /* 569 /*
585 * Start the timerschedule work, when this governor 570 * Start the timerschedule work, when this governor
586 * is used for first time 571 * is used for first time
@@ -598,29 +583,31 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
598 max(min_sampling_rate, 583 max(min_sampling_rate,
599 latency * LATENCY_MULTIPLIER); 584 latency * LATENCY_MULTIPLIER);
600 } 585 }
601 dbs_timer_init(this_dbs_info);
602
603 mutex_unlock(&dbs_mutex); 586 mutex_unlock(&dbs_mutex);
587
588 dbs_timer_init(this_dbs_info);
604 break; 589 break;
605 590
606 case CPUFREQ_GOV_STOP: 591 case CPUFREQ_GOV_STOP:
607 mutex_lock(&dbs_mutex);
608 dbs_timer_exit(this_dbs_info); 592 dbs_timer_exit(this_dbs_info);
593
594 mutex_lock(&dbs_mutex);
609 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 595 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
596 mutex_destroy(&this_dbs_info->timer_mutex);
610 dbs_enable--; 597 dbs_enable--;
611 mutex_unlock(&dbs_mutex); 598 mutex_unlock(&dbs_mutex);
612 599
613 break; 600 break;
614 601
615 case CPUFREQ_GOV_LIMITS: 602 case CPUFREQ_GOV_LIMITS:
616 mutex_lock(&dbs_mutex); 603 mutex_lock(&this_dbs_info->timer_mutex);
617 if (policy->max < this_dbs_info->cur_policy->cur) 604 if (policy->max < this_dbs_info->cur_policy->cur)
618 __cpufreq_driver_target(this_dbs_info->cur_policy, 605 __cpufreq_driver_target(this_dbs_info->cur_policy,
619 policy->max, CPUFREQ_RELATION_H); 606 policy->max, CPUFREQ_RELATION_H);
620 else if (policy->min > this_dbs_info->cur_policy->cur) 607 else if (policy->min > this_dbs_info->cur_policy->cur)
621 __cpufreq_driver_target(this_dbs_info->cur_policy, 608 __cpufreq_driver_target(this_dbs_info->cur_policy,
622 policy->min, CPUFREQ_RELATION_L); 609 policy->min, CPUFREQ_RELATION_L);
623 mutex_unlock(&dbs_mutex); 610 mutex_unlock(&this_dbs_info->timer_mutex);
624 break; 611 break;
625 } 612 }
626 return 0; 613 return 0;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index c36bf40568cf..e2a10bcba7a1 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -754,13 +754,13 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt)
754static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) 754static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
755{ 755{
756 int bit; 756 int bit;
757 enum dev_type edac_cap = EDAC_NONE; 757 enum dev_type edac_cap = EDAC_FLAG_NONE;
758 758
759 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F) 759 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
760 ? 19 760 ? 19
761 : 17; 761 : 17;
762 762
763 if (pvt->dclr0 >> BIT(bit)) 763 if (pvt->dclr0 & BIT(bit))
764 edac_cap = EDAC_FLAG_SECDED; 764 edac_cap = EDAC_FLAG_SECDED;
765 765
766 return edac_cap; 766 return edac_cap;
@@ -868,6 +868,8 @@ static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
868 goto err_reg; 868 goto err_reg;
869 } 869 }
870 870
871 return;
872
871err_reg: 873err_reg:
872 debugf0("Error reading F2x%03x.\n", reg); 874 debugf0("Error reading F2x%03x.\n", reg);
873} 875}
@@ -970,7 +972,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
970 } 972 }
971 973
972 for (cs = 0; cs < pvt->num_dcsm; cs++) { 974 for (cs = 0; cs < pvt->num_dcsm; cs++) {
973 reg = K8_DCSB0 + (cs * 4); 975 reg = K8_DCSM0 + (cs * 4);
974 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 976 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
975 &pvt->dcsm0[cs]); 977 &pvt->dcsm0[cs]);
976 if (unlikely(err)) 978 if (unlikely(err))
@@ -1269,7 +1271,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
1269 if (channels == 0) 1271 if (channels == 0)
1270 channels = 1; 1272 channels = 1;
1271 1273
1272 debugf0("DIMM count= %d\n", channels); 1274 debugf0("MCT channel count: %d\n", channels);
1273 1275
1274 return channels; 1276 return channels;
1275 1277
@@ -2634,6 +2636,8 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2634 2636
2635 amd64_dump_misc_regs(pvt); 2637 amd64_dump_misc_regs(pvt);
2636 2638
2639 return;
2640
2637err_reg: 2641err_reg:
2638 debugf0("Reading an MC register failed\n"); 2642 debugf0("Reading an MC register failed\n");
2639 2643
@@ -2966,12 +2970,20 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2966 " Use of the override can cause " 2970 " Use of the override can cause "
2967 "unknown side effects.\n"); 2971 "unknown side effects.\n");
2968 ret = -ENODEV; 2972 ret = -ENODEV;
2969 } 2973 } else
2974 /*
2975 * enable further driver loading if ECC enable is
2976 * overridden.
2977 */
2978 ret = 0;
2970 } else { 2979 } else {
2971 amd64_printk(KERN_INFO, 2980 amd64_printk(KERN_INFO,
2972 "ECC is enabled by BIOS, Proceeding " 2981 "ECC is enabled by BIOS, Proceeding "
2973 "with EDAC module initialization\n"); 2982 "with EDAC module initialization\n");
2974 2983
2984 /* Signal good ECC status */
2985 ret = 0;
2986
2975 /* CLEAR the override, since BIOS controlled it */ 2987 /* CLEAR the override, since BIOS controlled it */
2976 ecc_enable_override = 0; 2988 ecc_enable_override = 0;
2977 } 2989 }
@@ -3006,7 +3018,6 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
3006 3018
3007 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; 3019 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3008 mci->edac_ctl_cap = EDAC_FLAG_NONE; 3020 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3009 mci->edac_cap = EDAC_FLAG_NONE;
3010 3021
3011 if (pvt->nbcap & K8_NBCAP_SECDED) 3022 if (pvt->nbcap & K8_NBCAP_SECDED)
3012 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; 3023 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
@@ -3052,7 +3063,7 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
3052 if (!pvt) 3063 if (!pvt)
3053 goto err_exit; 3064 goto err_exit;
3054 3065
3055 pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl); 3066 pvt->mc_node_id = get_node_id(dram_f2_ctl);
3056 3067
3057 pvt->dram_f2_ctl = dram_f2_ctl; 3068 pvt->dram_f2_ctl = dram_f2_ctl;
3058 pvt->ext_model = boot_cpu_data.x86_model >> 4; 3069 pvt->ext_model = boot_cpu_data.x86_model >> 4;
@@ -3179,8 +3190,7 @@ static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
3179{ 3190{
3180 int ret = 0; 3191 int ret = 0;
3181 3192
3182 debugf0("(MC node=%d,mc_type='%s')\n", 3193 debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
3183 get_mc_node_id_from_pdev(pdev),
3184 get_amd_family_name(mc_type->driver_data)); 3194 get_amd_family_name(mc_type->driver_data));
3185 3195
3186 ret = pci_enable_device(pdev); 3196 ret = pci_enable_device(pdev);
@@ -3319,15 +3329,17 @@ static int __init amd64_edac_init(void)
3319 3329
3320 err = amd64_init_2nd_stage(pvt_lookup[nb]); 3330 err = amd64_init_2nd_stage(pvt_lookup[nb]);
3321 if (err) 3331 if (err)
3322 goto err_exit; 3332 goto err_2nd_stage;
3323 } 3333 }
3324 3334
3325 amd64_setup_pci_device(); 3335 amd64_setup_pci_device();
3326 3336
3327 return 0; 3337 return 0;
3328 3338
3339err_2nd_stage:
3340 debugf0("2nd stage failed\n");
3341
3329err_exit: 3342err_exit:
3330 debugf0("'finish_setup' stage failed\n");
3331 pci_unregister_driver(&amd64_pci_driver); 3343 pci_unregister_driver(&amd64_pci_driver);
3332 3344
3333 return err; 3345 return err;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index a159957e167b..ba73015af8e4 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -444,7 +444,7 @@ enum {
444#define K8_MSR_MC4ADDR 0x0412 444#define K8_MSR_MC4ADDR 0x0412
445 445
446/* AMD sets the first MC device at device ID 0x18. */ 446/* AMD sets the first MC device at device ID 0x18. */
447static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev) 447static inline int get_node_id(struct pci_dev *pdev)
448{ 448{
449 return PCI_SLOT(pdev->devfn) - 0x18; 449 return PCI_SLOT(pdev->devfn) - 0x18;
450} 450}
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 3493c6bdb820..871c13b4c148 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -150,6 +150,8 @@ enum mem_type {
150 MEM_FB_DDR2, /* fully buffered DDR2 */ 150 MEM_FB_DDR2, /* fully buffered DDR2 */
151 MEM_RDDR2, /* Registered DDR2 RAM */ 151 MEM_RDDR2, /* Registered DDR2 RAM */
152 MEM_XDR, /* Rambus XDR */ 152 MEM_XDR, /* Rambus XDR */
153 MEM_DDR3, /* DDR3 RAM */
154 MEM_RDDR3, /* Registered DDR3 RAM */
153}; 155};
154 156
155#define MEM_FLAG_EMPTY BIT(MEM_EMPTY) 157#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -167,6 +169,8 @@ enum mem_type {
167#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) 169#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
168#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) 170#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
169#define MEM_FLAG_XDR BIT(MEM_XDR) 171#define MEM_FLAG_XDR BIT(MEM_XDR)
172#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
173#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
170 174
171/* chipset Error Detection and Correction capabilities and mode */ 175/* chipset Error Detection and Correction capabilities and mode */
172enum edac_type { 176enum edac_type {
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ad218fe4942d..e1d4ce083481 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -94,7 +94,9 @@ static const char *mem_types[] = {
94 [MEM_DDR2] = "Unbuffered-DDR2", 94 [MEM_DDR2] = "Unbuffered-DDR2",
95 [MEM_FB_DDR2] = "FullyBuffered-DDR2", 95 [MEM_FB_DDR2] = "FullyBuffered-DDR2",
96 [MEM_RDDR2] = "Registered-DDR2", 96 [MEM_RDDR2] = "Registered-DDR2",
97 [MEM_XDR] = "XDR" 97 [MEM_XDR] = "XDR",
98 [MEM_DDR3] = "Unbuffered-DDR3",
99 [MEM_RDDR3] = "Registered-DDR3"
98}; 100};
99 101
100static const char *dev_types[] = { 102static const char *dev_types[] = {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 7c8c2d72916f..3f2ccfc6407c 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -757,6 +757,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
757 case DSC_SDTYPE_DDR2: 757 case DSC_SDTYPE_DDR2:
758 mtype = MEM_RDDR2; 758 mtype = MEM_RDDR2;
759 break; 759 break;
760 case DSC_SDTYPE_DDR3:
761 mtype = MEM_RDDR3;
762 break;
760 default: 763 default:
761 mtype = MEM_UNKNOWN; 764 mtype = MEM_UNKNOWN;
762 break; 765 break;
@@ -769,6 +772,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
769 case DSC_SDTYPE_DDR2: 772 case DSC_SDTYPE_DDR2:
770 mtype = MEM_DDR2; 773 mtype = MEM_DDR2;
771 break; 774 break;
775 case DSC_SDTYPE_DDR3:
776 mtype = MEM_DDR3;
777 break;
772 default: 778 default:
773 mtype = MEM_UNKNOWN; 779 mtype = MEM_UNKNOWN;
774 break; 780 break;
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 135b3539a030..52432ee7c4b9 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -53,6 +53,7 @@
53 53
54#define DSC_SDTYPE_DDR 0x02000000 54#define DSC_SDTYPE_DDR 0x02000000
55#define DSC_SDTYPE_DDR2 0x03000000 55#define DSC_SDTYPE_DDR2 0x03000000
56#define DSC_SDTYPE_DDR3 0x07000000
56#define DSC_X32_EN 0x00000020 57#define DSC_X32_EN 0x00000020
57 58
58/* Err_Int_En */ 59/* Err_Int_En */
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 2406c2ce2844..d4ec60593176 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -30,7 +30,7 @@
30/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */ 30/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
31 31
32#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ 32#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
33#define X38_MCHBAR_HIGH 0x4b 33#define X38_MCHBAR_HIGH 0x4c
34#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ 34#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
35#define X38_MMR_WINDOW_SIZE 16384 35#define X38_MMR_WINDOW_SIZE 16384
36 36
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 543fccac81bb..f74edae5cb4c 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -196,8 +196,8 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
196{ 196{
197 int channel, bandwidth = 0; 197 int channel, bandwidth = 0;
198 198
199 fw_iso_resource_manage(card, generation, 1ULL << 31, 199 fw_iso_resource_manage(card, generation, 1ULL << 31, &channel,
200 &channel, &bandwidth, true); 200 &bandwidth, true, card->bm_transaction_data);
201 if (channel == 31) { 201 if (channel == 31) {
202 card->broadcast_channel_allocated = true; 202 card->broadcast_channel_allocated = true;
203 device_for_each_child(card->device, (void *)(long)generation, 203 device_for_each_child(card->device, (void *)(long)generation,
@@ -230,7 +230,6 @@ static void fw_card_bm_work(struct work_struct *work)
230 bool do_reset = false; 230 bool do_reset = false;
231 bool root_device_is_running; 231 bool root_device_is_running;
232 bool root_device_is_cmc; 232 bool root_device_is_cmc;
233 __be32 lock_data[2];
234 233
235 spin_lock_irqsave(&card->lock, flags); 234 spin_lock_irqsave(&card->lock, flags);
236 235
@@ -273,22 +272,23 @@ static void fw_card_bm_work(struct work_struct *work)
273 goto pick_me; 272 goto pick_me;
274 } 273 }
275 274
276 lock_data[0] = cpu_to_be32(0x3f); 275 card->bm_transaction_data[0] = cpu_to_be32(0x3f);
277 lock_data[1] = cpu_to_be32(local_id); 276 card->bm_transaction_data[1] = cpu_to_be32(local_id);
278 277
279 spin_unlock_irqrestore(&card->lock, flags); 278 spin_unlock_irqrestore(&card->lock, flags);
280 279
281 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 280 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
282 irm_id, generation, SCODE_100, 281 irm_id, generation, SCODE_100,
283 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, 282 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
284 lock_data, sizeof(lock_data)); 283 card->bm_transaction_data,
284 sizeof(card->bm_transaction_data));
285 285
286 if (rcode == RCODE_GENERATION) 286 if (rcode == RCODE_GENERATION)
287 /* Another bus reset, BM work has been rescheduled. */ 287 /* Another bus reset, BM work has been rescheduled. */
288 goto out; 288 goto out;
289 289
290 if (rcode == RCODE_COMPLETE && 290 if (rcode == RCODE_COMPLETE &&
291 lock_data[0] != cpu_to_be32(0x3f)) { 291 card->bm_transaction_data[0] != cpu_to_be32(0x3f)) {
292 292
293 /* Somebody else is BM. Only act as IRM. */ 293 /* Somebody else is BM. Only act as IRM. */
294 if (local_id == irm_id) 294 if (local_id == irm_id)
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index d1d30c615b0f..ced186d7e9a9 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -125,6 +125,7 @@ struct iso_resource {
125 int generation; 125 int generation;
126 u64 channels; 126 u64 channels;
127 s32 bandwidth; 127 s32 bandwidth;
128 __be32 transaction_data[2];
128 struct iso_resource_event *e_alloc, *e_dealloc; 129 struct iso_resource_event *e_alloc, *e_dealloc;
129}; 130};
130 131
@@ -1049,7 +1050,8 @@ static void iso_resource_work(struct work_struct *work)
1049 r->channels, &channel, &bandwidth, 1050 r->channels, &channel, &bandwidth,
1050 todo == ISO_RES_ALLOC || 1051 todo == ISO_RES_ALLOC ||
1051 todo == ISO_RES_REALLOC || 1052 todo == ISO_RES_REALLOC ||
1052 todo == ISO_RES_ALLOC_ONCE); 1053 todo == ISO_RES_ALLOC_ONCE,
1054 r->transaction_data);
1053 /* 1055 /*
1054 * Is this generation outdated already? As long as this resource sticks 1056 * Is this generation outdated already? As long as this resource sticks
1055 * in the idr, it will be scheduled again for a newer generation or at 1057 * in the idr, it will be scheduled again for a newer generation or at
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 166f19c6d38d..110e731f5574 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -177,9 +177,8 @@ EXPORT_SYMBOL(fw_iso_context_stop);
177 */ 177 */
178 178
179static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, 179static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
180 int bandwidth, bool allocate) 180 int bandwidth, bool allocate, __be32 data[2])
181{ 181{
182 __be32 data[2];
183 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; 182 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
184 183
185 /* 184 /*
@@ -215,9 +214,9 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
215} 214}
216 215
217static int manage_channel(struct fw_card *card, int irm_id, int generation, 216static int manage_channel(struct fw_card *card, int irm_id, int generation,
218 u32 channels_mask, u64 offset, bool allocate) 217 u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
219{ 218{
220 __be32 data[2], c, all, old; 219 __be32 c, all, old;
221 int i, retry = 5; 220 int i, retry = 5;
222 221
223 old = all = allocate ? cpu_to_be32(~0) : 0; 222 old = all = allocate ? cpu_to_be32(~0) : 0;
@@ -260,7 +259,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
260} 259}
261 260
262static void deallocate_channel(struct fw_card *card, int irm_id, 261static void deallocate_channel(struct fw_card *card, int irm_id,
263 int generation, int channel) 262 int generation, int channel, __be32 buffer[2])
264{ 263{
265 u32 mask; 264 u32 mask;
266 u64 offset; 265 u64 offset;
@@ -269,7 +268,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
269 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : 268 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
270 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; 269 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
271 270
272 manage_channel(card, irm_id, generation, mask, offset, false); 271 manage_channel(card, irm_id, generation, mask, offset, false, buffer);
273} 272}
274 273
275/** 274/**
@@ -298,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
298 */ 297 */
299void fw_iso_resource_manage(struct fw_card *card, int generation, 298void fw_iso_resource_manage(struct fw_card *card, int generation,
300 u64 channels_mask, int *channel, int *bandwidth, 299 u64 channels_mask, int *channel, int *bandwidth,
301 bool allocate) 300 bool allocate, __be32 buffer[2])
302{ 301{
303 u32 channels_hi = channels_mask; /* channels 31...0 */ 302 u32 channels_hi = channels_mask; /* channels 31...0 */
304 u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ 303 u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
@@ -310,10 +309,12 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
310 309
311 if (channels_hi) 310 if (channels_hi)
312 c = manage_channel(card, irm_id, generation, channels_hi, 311 c = manage_channel(card, irm_id, generation, channels_hi,
313 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); 312 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
313 allocate, buffer);
314 if (channels_lo && c < 0) { 314 if (channels_lo && c < 0) {
315 c = manage_channel(card, irm_id, generation, channels_lo, 315 c = manage_channel(card, irm_id, generation, channels_lo,
316 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); 316 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
317 allocate, buffer);
317 if (c >= 0) 318 if (c >= 0)
318 c += 32; 319 c += 32;
319 } 320 }
@@ -325,12 +326,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
325 if (*bandwidth == 0) 326 if (*bandwidth == 0)
326 return; 327 return;
327 328
328 ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); 329 ret = manage_bandwidth(card, irm_id, generation, *bandwidth,
330 allocate, buffer);
329 if (ret < 0) 331 if (ret < 0)
330 *bandwidth = 0; 332 *bandwidth = 0;
331 333
332 if (allocate && ret < 0 && c >= 0) { 334 if (allocate && ret < 0 && c >= 0) {
333 deallocate_channel(card, irm_id, generation, c); 335 deallocate_channel(card, irm_id, generation, c, buffer);
334 *channel = ret; 336 *channel = ret;
335 } 337 }
336} 338}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index c3cfc647e5e3..6052816be353 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -120,7 +120,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
120 120
121int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); 121int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
122void fw_iso_resource_manage(struct fw_card *card, int generation, 122void fw_iso_resource_manage(struct fw_card *card, int generation,
123 u64 channels_mask, int *channel, int *bandwidth, bool allocate); 123 u64 channels_mask, int *channel, int *bandwidth,
124 bool allocate, __be32 buffer[2]);
124 125
125 126
126/* -topology */ 127/* -topology */
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 24c45635376a..8d51568ee143 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -201,6 +201,12 @@ static struct fw_device *target_device(struct sbp2_target *tgt)
201#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ 201#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
202 202
203/* 203/*
204 * There is no transport protocol limit to the CDB length, but we implement
205 * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
206 */
207#define SBP2_MAX_CDB_SIZE 16
208
209/*
204 * The default maximum s/g segment size of a FireWire controller is 210 * The default maximum s/g segment size of a FireWire controller is
205 * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to 211 * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
206 * be quadlet-aligned, we set the length limit to 0xffff & ~3. 212 * be quadlet-aligned, we set the length limit to 0xffff & ~3.
@@ -312,7 +318,7 @@ struct sbp2_command_orb {
312 struct sbp2_pointer next; 318 struct sbp2_pointer next;
313 struct sbp2_pointer data_descriptor; 319 struct sbp2_pointer data_descriptor;
314 __be32 misc; 320 __be32 misc;
315 u8 command_block[12]; 321 u8 command_block[SBP2_MAX_CDB_SIZE];
316 } request; 322 } request;
317 struct scsi_cmnd *cmd; 323 struct scsi_cmnd *cmd;
318 scsi_done_fn_t done; 324 scsi_done_fn_t done;
@@ -1146,6 +1152,8 @@ static int sbp2_probe(struct device *dev)
1146 if (fw_device_enable_phys_dma(device) < 0) 1152 if (fw_device_enable_phys_dma(device) < 0)
1147 goto fail_shost_put; 1153 goto fail_shost_put;
1148 1154
1155 shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
1156
1149 if (scsi_add_host(shost, &unit->device) < 0) 1157 if (scsi_add_host(shost, &unit->device) < 0)
1150 goto fail_shost_put; 1158 goto fail_shost_put;
1151 1159
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3582c39f9725..96dda81c9228 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -79,6 +79,12 @@ config GPIO_XILINX
79 help 79 help
80 Say yes here to support the Xilinx FPGA GPIO device 80 Say yes here to support the Xilinx FPGA GPIO device
81 81
82config GPIO_VR41XX
83 tristate "NEC VR4100 series General-purpose I/O Uint support"
84 depends on CPU_VR41XX
85 help
86 Say yes here to support the NEC VR4100 series General-purpose I/O Uint
87
82comment "I2C GPIO expanders:" 88comment "I2C GPIO expanders:"
83 89
84config GPIO_MAX732X 90config GPIO_MAX732X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index ef90203e8f3c..9244c6fcd8be 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_GPIO_PL061) += pl061.o
13obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o 13obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
14obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o 14obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
15obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o 15obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
16obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index aa8e7cb020d9..4ee4c8367a3f 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -109,6 +109,16 @@ static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
109 writeb(!!value << offset, chip->base + (1 << (offset + 2))); 109 writeb(!!value << offset, chip->base + (1 << (offset + 2)));
110} 110}
111 111
112static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
113{
114 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
115
116 if (chip->irq_base == (unsigned) -1)
117 return -EINVAL;
118
119 return chip->irq_base + offset;
120}
121
112/* 122/*
113 * PL061 GPIO IRQ 123 * PL061 GPIO IRQ
114 */ 124 */
@@ -200,7 +210,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
200 desc->chip->ack(irq); 210 desc->chip->ack(irq);
201 list_for_each(ptr, chip_list) { 211 list_for_each(ptr, chip_list) {
202 unsigned long pending; 212 unsigned long pending;
203 int gpio; 213 int offset;
204 214
205 chip = list_entry(ptr, struct pl061_gpio, list); 215 chip = list_entry(ptr, struct pl061_gpio, list);
206 pending = readb(chip->base + GPIOMIS); 216 pending = readb(chip->base + GPIOMIS);
@@ -209,8 +219,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
209 if (pending == 0) 219 if (pending == 0)
210 continue; 220 continue;
211 221
212 for_each_bit(gpio, &pending, PL061_GPIO_NR) 222 for_each_bit(offset, &pending, PL061_GPIO_NR)
213 generic_handle_irq(gpio_to_irq(gpio)); 223 generic_handle_irq(pl061_to_irq(&chip->gc, offset));
214 } 224 }
215 desc->chip->unmask(irq); 225 desc->chip->unmask(irq);
216} 226}
@@ -221,7 +231,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
221 struct pl061_gpio *chip; 231 struct pl061_gpio *chip;
222 struct list_head *chip_list; 232 struct list_head *chip_list;
223 int ret, irq, i; 233 int ret, irq, i;
224 static unsigned long init_irq[BITS_TO_LONGS(NR_IRQS)]; 234 static DECLARE_BITMAP(init_irq, NR_IRQS);
225 235
226 pdata = dev->dev.platform_data; 236 pdata = dev->dev.platform_data;
227 if (pdata == NULL) 237 if (pdata == NULL)
@@ -251,6 +261,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
251 chip->gc.direction_output = pl061_direction_output; 261 chip->gc.direction_output = pl061_direction_output;
252 chip->gc.get = pl061_get_value; 262 chip->gc.get = pl061_get_value;
253 chip->gc.set = pl061_set_value; 263 chip->gc.set = pl061_set_value;
264 chip->gc.to_irq = pl061_to_irq;
254 chip->gc.base = pdata->gpio_base; 265 chip->gc.base = pdata->gpio_base;
255 chip->gc.ngpio = PL061_GPIO_NR; 266 chip->gc.ngpio = PL061_GPIO_NR;
256 chip->gc.label = dev_name(&dev->dev); 267 chip->gc.label = dev_name(&dev->dev);
@@ -280,6 +291,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
280 if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */ 291 if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
281 chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL); 292 chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
282 if (chip_list == NULL) { 293 if (chip_list == NULL) {
294 clear_bit(irq, init_irq);
283 ret = -ENOMEM; 295 ret = -ENOMEM;
284 goto iounmap; 296 goto iounmap;
285 } 297 }
diff --git a/drivers/char/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
index 54c837288d19..b16c9a8c03f5 100644
--- a/drivers/char/vr41xx_giu.c
+++ b/drivers/gpio/vr41xx_giu.c
@@ -2,8 +2,8 @@
2 * Driver for NEC VR4100 series General-purpose I/O Unit. 2 * Driver for NEC VR4100 series General-purpose I/O Unit.
3 * 3 *
4 * Copyright (C) 2002 MontaVista Software Inc. 4 * Copyright (C) 2002 MontaVista Software Inc.
5 * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com> 5 * Author: Yoichi Yuasa <source@mvista.com>
6 * Copyright (C) 2003-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 6 * Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -21,29 +21,25 @@
21 */ 21 */
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/gpio.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/io.h>
26#include <linux/irq.h> 28#include <linux/irq.h>
27#include <linux/kernel.h> 29#include <linux/kernel.h>
28#include <linux/module.h> 30#include <linux/module.h>
29#include <linux/platform_device.h> 31#include <linux/platform_device.h>
30#include <linux/smp_lock.h>
31#include <linux/spinlock.h> 32#include <linux/spinlock.h>
32#include <linux/types.h> 33#include <linux/types.h>
33 34
34#include <asm/io.h>
35#include <asm/vr41xx/giu.h> 35#include <asm/vr41xx/giu.h>
36#include <asm/vr41xx/irq.h> 36#include <asm/vr41xx/irq.h>
37#include <asm/vr41xx/vr41xx.h> 37#include <asm/vr41xx/vr41xx.h>
38 38
39MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); 39MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
40MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver"); 40MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
41MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
42 42
43static int major; /* default is dynamic major device number */
44module_param(major, int, 0);
45MODULE_PARM_DESC(major, "Major device number");
46
47#define GIUIOSELL 0x00 43#define GIUIOSELL 0x00
48#define GIUIOSELH 0x02 44#define GIUIOSELH 0x02
49#define GIUPIODL 0x04 45#define GIUPIODL 0x04
@@ -76,9 +72,13 @@ MODULE_PARM_DESC(major, "Major device number");
76#define GPIO_HAS_OUTPUT_ENABLE 0x0002 72#define GPIO_HAS_OUTPUT_ENABLE 0x0002
77#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100 73#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
78 74
79static spinlock_t giu_lock; 75enum {
76 GPIO_INPUT,
77 GPIO_OUTPUT,
78};
79
80static DEFINE_SPINLOCK(giu_lock);
80static unsigned long giu_flags; 81static unsigned long giu_flags;
81static unsigned int giu_nr_pins;
82 82
83static void __iomem *giu_base; 83static void __iomem *giu_base;
84 84
@@ -89,9 +89,9 @@ static void __iomem *giu_base;
89#define GIUINT_HIGH_OFFSET 16 89#define GIUINT_HIGH_OFFSET 16
90#define GIUINT_HIGH_MAX 32 90#define GIUINT_HIGH_MAX 32
91 91
92static inline uint16_t giu_set(uint16_t offset, uint16_t set) 92static inline u16 giu_set(u16 offset, u16 set)
93{ 93{
94 uint16_t data; 94 u16 data;
95 95
96 data = giu_read(offset); 96 data = giu_read(offset);
97 data |= set; 97 data |= set;
@@ -100,9 +100,9 @@ static inline uint16_t giu_set(uint16_t offset, uint16_t set)
100 return data; 100 return data;
101} 101}
102 102
103static inline uint16_t giu_clear(uint16_t offset, uint16_t clear) 103static inline u16 giu_clear(u16 offset, u16 clear)
104{ 104{
105 uint16_t data; 105 u16 data;
106 106
107 data = giu_read(offset); 107 data = giu_read(offset);
108 data &= ~clear; 108 data &= ~clear;
@@ -145,7 +145,8 @@ static struct irq_chip giuint_low_irq_chip = {
145 145
146static void ack_giuint_high(unsigned int irq) 146static void ack_giuint_high(unsigned int irq)
147{ 147{
148 giu_write(GIUINTSTATH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET)); 148 giu_write(GIUINTSTATH,
149 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
149} 150}
150 151
151static void mask_giuint_high(unsigned int irq) 152static void mask_giuint_high(unsigned int irq)
@@ -177,7 +178,7 @@ static struct irq_chip giuint_high_irq_chip = {
177 178
178static int giu_get_irq(unsigned int irq) 179static int giu_get_irq(unsigned int irq)
179{ 180{
180 uint16_t pendl, pendh, maskl, maskh; 181 u16 pendl, pendh, maskl, maskh;
181 int i; 182 int i;
182 183
183 pendl = giu_read(GIUINTSTATL); 184 pendl = giu_read(GIUINTSTATL);
@@ -208,14 +209,15 @@ static int giu_get_irq(unsigned int irq)
208 return -EINVAL; 209 return -EINVAL;
209} 210}
210 211
211void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_t signal) 212void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
213 irq_signal_t signal)
212{ 214{
213 uint16_t mask; 215 u16 mask;
214 216
215 if (pin < GIUINT_HIGH_OFFSET) { 217 if (pin < GIUINT_HIGH_OFFSET) {
216 mask = 1 << pin; 218 mask = 1 << pin;
217 if (trigger != IRQ_TRIGGER_LEVEL) { 219 if (trigger != IRQ_TRIGGER_LEVEL) {
218 giu_set(GIUINTTYPL, mask); 220 giu_set(GIUINTTYPL, mask);
219 if (signal == IRQ_SIGNAL_HOLD) 221 if (signal == IRQ_SIGNAL_HOLD)
220 giu_set(GIUINTHTSELL, mask); 222 giu_set(GIUINTHTSELL, mask);
221 else 223 else
@@ -237,14 +239,14 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_
237 } 239 }
238 } 240 }
239 set_irq_chip_and_handler(GIU_IRQ(pin), 241 set_irq_chip_and_handler(GIU_IRQ(pin),
240 &giuint_low_irq_chip, 242 &giuint_low_irq_chip,
241 handle_edge_irq); 243 handle_edge_irq);
242 } else { 244 } else {
243 giu_clear(GIUINTTYPL, mask); 245 giu_clear(GIUINTTYPL, mask);
244 giu_clear(GIUINTHTSELL, mask); 246 giu_clear(GIUINTHTSELL, mask);
245 set_irq_chip_and_handler(GIU_IRQ(pin), 247 set_irq_chip_and_handler(GIU_IRQ(pin),
246 &giuint_low_irq_chip, 248 &giuint_low_irq_chip,
247 handle_level_irq); 249 handle_level_irq);
248 } 250 }
249 giu_write(GIUINTSTATL, mask); 251 giu_write(GIUINTSTATL, mask);
250 } else if (pin < GIUINT_HIGH_MAX) { 252 } else if (pin < GIUINT_HIGH_MAX) {
@@ -272,14 +274,14 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_
272 } 274 }
273 } 275 }
274 set_irq_chip_and_handler(GIU_IRQ(pin), 276 set_irq_chip_and_handler(GIU_IRQ(pin),
275 &giuint_high_irq_chip, 277 &giuint_high_irq_chip,
276 handle_edge_irq); 278 handle_edge_irq);
277 } else { 279 } else {
278 giu_clear(GIUINTTYPH, mask); 280 giu_clear(GIUINTTYPH, mask);
279 giu_clear(GIUINTHTSELH, mask); 281 giu_clear(GIUINTHTSELH, mask);
280 set_irq_chip_and_handler(GIU_IRQ(pin), 282 set_irq_chip_and_handler(GIU_IRQ(pin),
281 &giuint_high_irq_chip, 283 &giuint_high_irq_chip,
282 handle_level_irq); 284 handle_level_irq);
283 } 285 }
284 giu_write(GIUINTSTATH, mask); 286 giu_write(GIUINTSTATH, mask);
285 } 287 }
@@ -288,7 +290,7 @@ EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
288 290
289void vr41xx_set_irq_level(unsigned int pin, irq_level_t level) 291void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
290{ 292{
291 uint16_t mask; 293 u16 mask;
292 294
293 if (pin < GIUINT_HIGH_OFFSET) { 295 if (pin < GIUINT_HIGH_OFFSET) {
294 mask = 1 << pin; 296 mask = 1 << pin;
@@ -308,89 +310,24 @@ void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
308} 310}
309EXPORT_SYMBOL_GPL(vr41xx_set_irq_level); 311EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
310 312
311gpio_data_t vr41xx_gpio_get_pin(unsigned int pin) 313static int giu_set_direction(struct gpio_chip *chip, unsigned pin, int dir)
312{
313 uint16_t reg, mask;
314
315 if (pin >= giu_nr_pins)
316 return GPIO_DATA_INVAL;
317
318 if (pin < 16) {
319 reg = giu_read(GIUPIODL);
320 mask = (uint16_t)1 << pin;
321 } else if (pin < 32) {
322 reg = giu_read(GIUPIODH);
323 mask = (uint16_t)1 << (pin - 16);
324 } else if (pin < 48) {
325 reg = giu_read(GIUPODATL);
326 mask = (uint16_t)1 << (pin - 32);
327 } else {
328 reg = giu_read(GIUPODATH);
329 mask = (uint16_t)1 << (pin - 48);
330 }
331
332 if (reg & mask)
333 return GPIO_DATA_HIGH;
334
335 return GPIO_DATA_LOW;
336}
337EXPORT_SYMBOL_GPL(vr41xx_gpio_get_pin);
338
339int vr41xx_gpio_set_pin(unsigned int pin, gpio_data_t data)
340{
341 uint16_t offset, mask, reg;
342 unsigned long flags;
343
344 if (pin >= giu_nr_pins)
345 return -EINVAL;
346
347 if (pin < 16) {
348 offset = GIUPIODL;
349 mask = (uint16_t)1 << pin;
350 } else if (pin < 32) {
351 offset = GIUPIODH;
352 mask = (uint16_t)1 << (pin - 16);
353 } else if (pin < 48) {
354 offset = GIUPODATL;
355 mask = (uint16_t)1 << (pin - 32);
356 } else {
357 offset = GIUPODATH;
358 mask = (uint16_t)1 << (pin - 48);
359 }
360
361 spin_lock_irqsave(&giu_lock, flags);
362
363 reg = giu_read(offset);
364 if (data == GPIO_DATA_HIGH)
365 reg |= mask;
366 else
367 reg &= ~mask;
368 giu_write(offset, reg);
369
370 spin_unlock_irqrestore(&giu_lock, flags);
371
372 return 0;
373}
374EXPORT_SYMBOL_GPL(vr41xx_gpio_set_pin);
375
376int vr41xx_gpio_set_direction(unsigned int pin, gpio_direction_t dir)
377{ 314{
378 uint16_t offset, mask, reg; 315 u16 offset, mask, reg;
379 unsigned long flags; 316 unsigned long flags;
380 317
381 if (pin >= giu_nr_pins) 318 if (pin >= chip->ngpio)
382 return -EINVAL; 319 return -EINVAL;
383 320
384 if (pin < 16) { 321 if (pin < 16) {
385 offset = GIUIOSELL; 322 offset = GIUIOSELL;
386 mask = (uint16_t)1 << pin; 323 mask = 1 << pin;
387 } else if (pin < 32) { 324 } else if (pin < 32) {
388 offset = GIUIOSELH; 325 offset = GIUIOSELH;
389 mask = (uint16_t)1 << (pin - 16); 326 mask = 1 << (pin - 16);
390 } else { 327 } else {
391 if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) { 328 if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) {
392 offset = GIUPODATEN; 329 offset = GIUPODATEN;
393 mask = (uint16_t)1 << (pin - 32); 330 mask = 1 << (pin - 32);
394 } else { 331 } else {
395 switch (pin) { 332 switch (pin) {
396 case 48: 333 case 48:
@@ -420,11 +357,10 @@ int vr41xx_gpio_set_direction(unsigned int pin, gpio_direction_t dir)
420 357
421 return 0; 358 return 0;
422} 359}
423EXPORT_SYMBOL_GPL(vr41xx_gpio_set_direction);
424 360
425int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull) 361int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
426{ 362{
427 uint16_t reg, mask; 363 u16 reg, mask;
428 unsigned long flags; 364 unsigned long flags;
429 365
430 if ((giu_flags & GPIO_HAS_PULLUPDOWN_IO) != GPIO_HAS_PULLUPDOWN_IO) 366 if ((giu_flags & GPIO_HAS_PULLUPDOWN_IO) != GPIO_HAS_PULLUPDOWN_IO)
@@ -433,7 +369,7 @@ int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
433 if (pin >= 15) 369 if (pin >= 15)
434 return -EINVAL; 370 return -EINVAL;
435 371
436 mask = (uint16_t)1 << pin; 372 mask = 1 << pin;
437 373
438 spin_lock_irqsave(&giu_lock, flags); 374 spin_lock_irqsave(&giu_lock, flags);
439 375
@@ -460,146 +396,125 @@ int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
460} 396}
461EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown); 397EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown);
462 398
463static ssize_t gpio_read(struct file *file, char __user *buf, size_t len, 399static int vr41xx_gpio_get(struct gpio_chip *chip, unsigned pin)
464 loff_t *ppos)
465{ 400{
466 unsigned int pin; 401 u16 reg, mask;
467 char value = '0';
468 402
469 pin = iminor(file->f_path.dentry->d_inode); 403 if (pin >= chip->ngpio)
470 if (pin >= giu_nr_pins) 404 return -EINVAL;
471 return -EBADF;
472
473 if (vr41xx_gpio_get_pin(pin) == GPIO_DATA_HIGH)
474 value = '1';
475 405
476 if (len <= 0) 406 if (pin < 16) {
477 return -EFAULT; 407 reg = giu_read(GIUPIODL);
408 mask = 1 << pin;
409 } else if (pin < 32) {
410 reg = giu_read(GIUPIODH);
411 mask = 1 << (pin - 16);
412 } else if (pin < 48) {
413 reg = giu_read(GIUPODATL);
414 mask = 1 << (pin - 32);
415 } else {
416 reg = giu_read(GIUPODATH);
417 mask = 1 << (pin - 48);
418 }
478 419
479 if (put_user(value, buf)) 420 if (reg & mask)
480 return -EFAULT; 421 return 1;
481 422
482 return 1; 423 return 0;
483} 424}
484 425
485static ssize_t gpio_write(struct file *file, const char __user *data, 426static void vr41xx_gpio_set(struct gpio_chip *chip, unsigned pin,
486 size_t len, loff_t *ppos) 427 int value)
487{ 428{
488 unsigned int pin; 429 u16 offset, mask, reg;
489 size_t i; 430 unsigned long flags;
490 char c; 431
491 int retval = 0; 432 if (pin >= chip->ngpio)
492 433 return;
493 pin = iminor(file->f_path.dentry->d_inode);
494 if (pin >= giu_nr_pins)
495 return -EBADF;
496
497 for (i = 0; i < len; i++) {
498 if (get_user(c, data + i))
499 return -EFAULT;
500
501 switch (c) {
502 case '0':
503 retval = vr41xx_gpio_set_pin(pin, GPIO_DATA_LOW);
504 break;
505 case '1':
506 retval = vr41xx_gpio_set_pin(pin, GPIO_DATA_HIGH);
507 break;
508 case 'D':
509 printk(KERN_INFO "GPIO%d: pull down\n", pin);
510 retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DOWN);
511 break;
512 case 'd':
513 printk(KERN_INFO "GPIO%d: pull up/down disable\n", pin);
514 retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DISABLE);
515 break;
516 case 'I':
517 printk(KERN_INFO "GPIO%d: input\n", pin);
518 retval = vr41xx_gpio_set_direction(pin, GPIO_INPUT);
519 break;
520 case 'O':
521 printk(KERN_INFO "GPIO%d: output\n", pin);
522 retval = vr41xx_gpio_set_direction(pin, GPIO_OUTPUT);
523 break;
524 case 'o':
525 printk(KERN_INFO "GPIO%d: output disable\n", pin);
526 retval = vr41xx_gpio_set_direction(pin, GPIO_OUTPUT_DISABLE);
527 break;
528 case 'P':
529 printk(KERN_INFO "GPIO%d: pull up\n", pin);
530 retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_UP);
531 break;
532 case 'p':
533 printk(KERN_INFO "GPIO%d: pull up/down disable\n", pin);
534 retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DISABLE);
535 break;
536 default:
537 break;
538 }
539 434
540 if (retval < 0) 435 if (pin < 16) {
541 break; 436 offset = GIUPIODL;
437 mask = 1 << pin;
438 } else if (pin < 32) {
439 offset = GIUPIODH;
440 mask = 1 << (pin - 16);
441 } else if (pin < 48) {
442 offset = GIUPODATL;
443 mask = 1 << (pin - 32);
444 } else {
445 offset = GIUPODATH;
446 mask = 1 << (pin - 48);
542 } 447 }
543 448
544 return i; 449 spin_lock_irqsave(&giu_lock, flags);
450
451 reg = giu_read(offset);
452 if (value)
453 reg |= mask;
454 else
455 reg &= ~mask;
456 giu_write(offset, reg);
457
458 spin_unlock_irqrestore(&giu_lock, flags);
545} 459}
546 460
547static int gpio_open(struct inode *inode, struct file *file) 461
462static int vr41xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
548{ 463{
549 unsigned int pin; 464 return giu_set_direction(chip, offset, GPIO_INPUT);
465}
550 466
551 cycle_kernel_lock(); 467static int vr41xx_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
552 pin = iminor(inode); 468 int value)
553 if (pin >= giu_nr_pins) 469{
554 return -EBADF; 470 vr41xx_gpio_set(chip, offset, value);
555 471
556 return nonseekable_open(inode, file); 472 return giu_set_direction(chip, offset, GPIO_OUTPUT);
557} 473}
558 474
559static int gpio_release(struct inode *inode, struct file *file) 475static int vr41xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
560{ 476{
561 unsigned int pin; 477 if (offset >= chip->ngpio)
562 478 return -EINVAL;
563 pin = iminor(inode);
564 if (pin >= giu_nr_pins)
565 return -EBADF;
566 479
567 return 0; 480 return GIU_IRQ_BASE + offset;
568} 481}
569 482
570static const struct file_operations gpio_fops = { 483static struct gpio_chip vr41xx_gpio_chip = {
571 .owner = THIS_MODULE, 484 .label = "vr41xx",
572 .read = gpio_read, 485 .owner = THIS_MODULE,
573 .write = gpio_write, 486 .direction_input = vr41xx_gpio_direction_input,
574 .open = gpio_open, 487 .get = vr41xx_gpio_get,
575 .release = gpio_release, 488 .direction_output = vr41xx_gpio_direction_output,
489 .set = vr41xx_gpio_set,
490 .to_irq = vr41xx_gpio_to_irq,
576}; 491};
577 492
578static int __devinit giu_probe(struct platform_device *dev) 493static int __devinit giu_probe(struct platform_device *pdev)
579{ 494{
580 struct resource *res; 495 struct resource *res;
581 unsigned int trigger, i, pin; 496 unsigned int trigger, i, pin;
582 struct irq_chip *chip; 497 struct irq_chip *chip;
583 int irq, retval; 498 int irq, retval;
584 499
585 switch (dev->id) { 500 switch (pdev->id) {
586 case GPIO_50PINS_PULLUPDOWN: 501 case GPIO_50PINS_PULLUPDOWN:
587 giu_flags = GPIO_HAS_PULLUPDOWN_IO; 502 giu_flags = GPIO_HAS_PULLUPDOWN_IO;
588 giu_nr_pins = 50; 503 vr41xx_gpio_chip.ngpio = 50;
589 break; 504 break;
590 case GPIO_36PINS: 505 case GPIO_36PINS:
591 giu_nr_pins = 36; 506 vr41xx_gpio_chip.ngpio = 36;
592 break; 507 break;
593 case GPIO_48PINS_EDGE_SELECT: 508 case GPIO_48PINS_EDGE_SELECT:
594 giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT; 509 giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
595 giu_nr_pins = 48; 510 vr41xx_gpio_chip.ngpio = 48;
596 break; 511 break;
597 default: 512 default:
598 printk(KERN_ERR "GIU: unknown ID %d\n", dev->id); 513 dev_err(&pdev->dev, "GIU: unknown ID %d\n", pdev->id);
599 return -ENODEV; 514 return -ENODEV;
600 } 515 }
601 516
602 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 517 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
603 if (!res) 518 if (!res)
604 return -EBUSY; 519 return -EBUSY;
605 520
@@ -607,19 +522,9 @@ static int __devinit giu_probe(struct platform_device *dev)
607 if (!giu_base) 522 if (!giu_base)
608 return -ENOMEM; 523 return -ENOMEM;
609 524
610 retval = register_chrdev(major, "GIU", &gpio_fops); 525 vr41xx_gpio_chip.dev = &pdev->dev;
611 if (retval < 0) {
612 iounmap(giu_base);
613 giu_base = NULL;
614 return retval;
615 }
616
617 if (major == 0) {
618 major = retval;
619 printk(KERN_INFO "GIU: major number %d\n", major);
620 }
621 526
622 spin_lock_init(&giu_lock); 527 retval = gpiochip_add(&vr41xx_gpio_chip);
623 528
624 giu_write(GIUINTENL, 0); 529 giu_write(GIUINTENL, 0);
625 giu_write(GIUINTENH, 0); 530 giu_write(GIUINTENH, 0);
@@ -640,14 +545,14 @@ static int __devinit giu_probe(struct platform_device *dev)
640 545
641 } 546 }
642 547
643 irq = platform_get_irq(dev, 0); 548 irq = platform_get_irq(pdev, 0);
644 if (irq < 0 || irq >= nr_irqs) 549 if (irq < 0 || irq >= nr_irqs)
645 return -EBUSY; 550 return -EBUSY;
646 551
647 return cascade_irq(irq, giu_get_irq); 552 return cascade_irq(irq, giu_get_irq);
648} 553}
649 554
650static int __devexit giu_remove(struct platform_device *dev) 555static int __devexit giu_remove(struct platform_device *pdev)
651{ 556{
652 if (giu_base) { 557 if (giu_base) {
653 iounmap(giu_base); 558 iounmap(giu_base);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c961fe415aef..39b393d38bb3 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -81,6 +81,7 @@ config DRM_I830
81 81
82config DRM_I915 82config DRM_I915
83 tristate "i915 driver" 83 tristate "i915 driver"
84 depends on AGP_INTEL
84 select FB_CFB_FILLRECT 85 select FB_CFB_FILLRECT
85 select FB_CFB_COPYAREA 86 select FB_CFB_COPYAREA
86 select FB_CFB_IMAGEBLIT 87 select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4e89ab08b7b8..fe23f29f7cba 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -16,6 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
16drm-$(CONFIG_COMPAT) += drm_ioc32.o 16drm-$(CONFIG_COMPAT) += drm_ioc32.o
17 17
18obj-$(CONFIG_DRM) += drm.o 18obj-$(CONFIG_DRM) += drm.o
19obj-$(CONFIG_DRM_TTM) += ttm/
19obj-$(CONFIG_DRM_TDFX) += tdfx/ 20obj-$(CONFIG_DRM_TDFX) += tdfx/
20obj-$(CONFIG_DRM_R128) += r128/ 21obj-$(CONFIG_DRM_R128) += r128/
21obj-$(CONFIG_DRM_RADEON)+= radeon/ 22obj-$(CONFIG_DRM_RADEON)+= radeon/
@@ -26,4 +27,3 @@ obj-$(CONFIG_DRM_I915) += i915/
26obj-$(CONFIG_DRM_SIS) += sis/ 27obj-$(CONFIG_DRM_SIS) += sis/
27obj-$(CONFIG_DRM_SAVAGE)+= savage/ 28obj-$(CONFIG_DRM_SAVAGE)+= savage/
28obj-$(CONFIG_DRM_VIA) +=via/ 29obj-$(CONFIG_DRM_VIA) +=via/
29obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 8fab7890a363..33be210d6723 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1461,7 +1461,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1461 goto out; 1461 goto out;
1462 } 1462 }
1463 1463
1464 if (crtc_req->count_connectors > 0 && !mode && !fb) { 1464 if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
1465 DRM_DEBUG("Count connectors is %d but no mode or fb set\n", 1465 DRM_DEBUG("Count connectors is %d but no mode or fb set\n",
1466 crtc_req->count_connectors); 1466 crtc_req->count_connectors);
1467 ret = -EINVAL; 1467 ret = -EINVAL;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a6f73f1e99d9..6aaa2cb23365 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -706,8 +706,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
706 struct drm_encoder **save_encoders, *new_encoder; 706 struct drm_encoder **save_encoders, *new_encoder;
707 struct drm_framebuffer *old_fb = NULL; 707 struct drm_framebuffer *old_fb = NULL;
708 bool save_enabled; 708 bool save_enabled;
709 bool mode_changed = false; 709 bool mode_changed = false; /* if true do a full mode set */
710 bool fb_changed = false; 710 bool fb_changed = false; /* if true and !mode_changed just do a flip */
711 struct drm_connector *connector; 711 struct drm_connector *connector;
712 int count = 0, ro, fail = 0; 712 int count = 0, ro, fail = 0;
713 struct drm_crtc_helper_funcs *crtc_funcs; 713 struct drm_crtc_helper_funcs *crtc_funcs;
@@ -758,6 +758,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
758 if (set->crtc->fb == NULL) { 758 if (set->crtc->fb == NULL) {
759 DRM_DEBUG("crtc has no fb, full mode set\n"); 759 DRM_DEBUG("crtc has no fb, full mode set\n");
760 mode_changed = true; 760 mode_changed = true;
761 } else if (set->fb == NULL) {
762 mode_changed = true;
761 } else if ((set->fb->bits_per_pixel != 763 } else if ((set->fb->bits_per_pixel !=
762 set->crtc->fb->bits_per_pixel) || 764 set->crtc->fb->bits_per_pixel) ||
763 set->fb->depth != set->crtc->fb->depth) 765 set->fb->depth != set->crtc->fb->depth)
@@ -1090,6 +1092,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
1090 if (ret == false) 1092 if (ret == false)
1091 DRM_ERROR("failed to set mode on crtc %p\n", crtc); 1093 DRM_ERROR("failed to set mode on crtc %p\n", crtc);
1092 } 1094 }
1095 /* disable the unused connectors while restoring the modesetting */
1096 drm_helper_disable_unused_functions(dev);
1093 return 0; 1097 return 0;
1094} 1098}
1095EXPORT_SYMBOL(drm_helper_resume_force_mode); 1099EXPORT_SYMBOL(drm_helper_resume_force_mode);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 2960b6d73456..9903f270e440 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -101,6 +101,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
101 continue; 101 continue;
102 102
103 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 103 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
104 if (tmp == NULL) {
105 ret = -1;
106 goto fail;
107 }
104 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, 108 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
105 root, tmp, &drm_debugfs_fops); 109 root, tmp, &drm_debugfs_fops);
106 if (!ent) { 110 if (!ent) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7d0835226f6e..80cc6d06d61b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -294,10 +294,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
294 unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; 294 unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
295 unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; 295 unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
296 unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; 296 unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
297 unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo; 297 unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
298 unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo; 298 unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
299 unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf); 299 unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
300 unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; 300 unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
301 301
302 /* ignore tiny modes */ 302 /* ignore tiny modes */
303 if (hactive < 64 || vactive < 64) 303 if (hactive < 64 || vactive < 64)
@@ -347,8 +347,8 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
347 mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? 347 mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
348 DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; 348 DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
349 349
350 mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; 350 mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
351 mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; 351 mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
352 352
353 if (quirks & EDID_QUIRK_DETAILED_IN_CM) { 353 if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
354 mode->width_mm *= 10; 354 mode->width_mm *= 10;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8104ecaea26f..ffe8f4394d50 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -134,26 +134,29 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
134 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 134 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
135 135
136 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 136 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
137 if (!obj)
138 goto free;
137 139
138 obj->dev = dev; 140 obj->dev = dev;
139 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 141 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
140 if (IS_ERR(obj->filp)) { 142 if (IS_ERR(obj->filp))
141 kfree(obj); 143 goto free;
142 return NULL;
143 }
144 144
145 kref_init(&obj->refcount); 145 kref_init(&obj->refcount);
146 kref_init(&obj->handlecount); 146 kref_init(&obj->handlecount);
147 obj->size = size; 147 obj->size = size;
148 if (dev->driver->gem_init_object != NULL && 148 if (dev->driver->gem_init_object != NULL &&
149 dev->driver->gem_init_object(obj) != 0) { 149 dev->driver->gem_init_object(obj) != 0) {
150 fput(obj->filp); 150 goto fput;
151 kfree(obj);
152 return NULL;
153 } 151 }
154 atomic_inc(&dev->object_count); 152 atomic_inc(&dev->object_count);
155 atomic_add(obj->size, &dev->object_memory); 153 atomic_add(obj->size, &dev->object_memory);
156 return obj; 154 return obj;
155fput:
156 fput(obj->filp);
157free:
158 kfree(obj);
159 return NULL;
157} 160}
158EXPORT_SYMBOL(drm_gem_object_alloc); 161EXPORT_SYMBOL(drm_gem_object_alloc);
159 162
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 155a5bbce680..55bb8a82d612 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -489,7 +489,7 @@ int drm_put_minor(struct drm_minor **minor_p)
489 */ 489 */
490void drm_put_dev(struct drm_device *dev) 490void drm_put_dev(struct drm_device *dev)
491{ 491{
492 struct drm_driver *driver = dev->driver; 492 struct drm_driver *driver;
493 struct drm_map_list *r_list, *list_temp; 493 struct drm_map_list *r_list, *list_temp;
494 494
495 DRM_DEBUG("\n"); 495 DRM_DEBUG("\n");
@@ -498,6 +498,7 @@ void drm_put_dev(struct drm_device *dev)
498 DRM_ERROR("cleanup called no dev\n"); 498 DRM_ERROR("cleanup called no dev\n");
499 return; 499 return;
500 } 500 }
501 driver = dev->driver;
501 502
502 drm_vblank_cleanup(dev); 503 drm_vblank_cleanup(dev);
503 504
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 51c5a050aa73..30d6b99fb302 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
13 intel_crt.o \ 13 intel_crt.o \
14 intel_lvds.o \ 14 intel_lvds.o \
15 intel_bios.o \ 15 intel_bios.o \
16 intel_dp.o \
17 intel_dp_i2c.o \
16 intel_hdmi.o \ 18 intel_hdmi.o \
17 intel_sdvo.o \ 19 intel_sdvo.o \
18 intel_modes.o \ 20 intel_modes.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index e747ac42fe3a..288fc50627e2 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -37,7 +37,7 @@ struct intel_dvo_device {
37 /* GPIO register used for i2c bus to control this device */ 37 /* GPIO register used for i2c bus to control this device */
38 u32 gpio; 38 u32 gpio;
39 int slave_addr; 39 int slave_addr;
40 struct intel_i2c_chan *i2c_bus; 40 struct i2c_adapter *i2c_bus;
41 41
42 const struct intel_dvo_dev_ops *dev_ops; 42 const struct intel_dvo_dev_ops *dev_ops;
43 void *dev_priv; 43 void *dev_priv;
@@ -52,7 +52,7 @@ struct intel_dvo_dev_ops {
52 * Returns NULL if the device does not exist. 52 * Returns NULL if the device does not exist.
53 */ 53 */
54 bool (*init)(struct intel_dvo_device *dvo, 54 bool (*init)(struct intel_dvo_device *dvo,
55 struct intel_i2c_chan *i2cbus); 55 struct i2c_adapter *i2cbus);
56 56
57 /* 57 /*
58 * Called to allow the output a chance to create properties after the 58 * Called to allow the output a chance to create properties after the
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 03d4b4973b02..621815b531db 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -176,19 +176,20 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
176 176
177static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) 177static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
178{ 178{
179 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 179 struct i2c_adapter *adapter = dvo->i2c_bus;
180 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
180 u8 out_buf[2]; 181 u8 out_buf[2];
181 u8 in_buf[2]; 182 u8 in_buf[2];
182 183
183 struct i2c_msg msgs[] = { 184 struct i2c_msg msgs[] = {
184 { 185 {
185 .addr = i2cbus->slave_addr, 186 .addr = dvo->slave_addr,
186 .flags = 0, 187 .flags = 0,
187 .len = 1, 188 .len = 1,
188 .buf = out_buf, 189 .buf = out_buf,
189 }, 190 },
190 { 191 {
191 .addr = i2cbus->slave_addr, 192 .addr = dvo->slave_addr,
192 .flags = I2C_M_RD, 193 .flags = I2C_M_RD,
193 .len = 1, 194 .len = 1,
194 .buf = in_buf, 195 .buf = in_buf,
@@ -208,10 +209,11 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
208 209
209static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) 210static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
210{ 211{
211 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 212 struct i2c_adapter *adapter = dvo->i2c_bus;
213 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
212 uint8_t out_buf[2]; 214 uint8_t out_buf[2];
213 struct i2c_msg msg = { 215 struct i2c_msg msg = {
214 .addr = i2cbus->slave_addr, 216 .addr = dvo->slave_addr,
215 .flags = 0, 217 .flags = 0,
216 .len = 2, 218 .len = 2,
217 .buf = out_buf, 219 .buf = out_buf,
@@ -228,8 +230,9 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
228 230
229/** Probes for a CH7017 on the given bus and slave address. */ 231/** Probes for a CH7017 on the given bus and slave address. */
230static bool ch7017_init(struct intel_dvo_device *dvo, 232static bool ch7017_init(struct intel_dvo_device *dvo,
231 struct intel_i2c_chan *i2cbus) 233 struct i2c_adapter *adapter)
232{ 234{
235 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
233 struct ch7017_priv *priv; 236 struct ch7017_priv *priv;
234 uint8_t val; 237 uint8_t val;
235 238
@@ -237,8 +240,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
237 if (priv == NULL) 240 if (priv == NULL)
238 return false; 241 return false;
239 242
240 dvo->i2c_bus = i2cbus; 243 dvo->i2c_bus = adapter;
241 dvo->i2c_bus->slave_addr = dvo->slave_addr;
242 dvo->dev_priv = priv; 244 dvo->dev_priv = priv;
243 245
244 if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) 246 if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
@@ -248,7 +250,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
248 val != CH7018_DEVICE_ID_VALUE && 250 val != CH7018_DEVICE_ID_VALUE &&
249 val != CH7019_DEVICE_ID_VALUE) { 251 val != CH7019_DEVICE_ID_VALUE) {
250 DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", 252 DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
251 val, i2cbus->adapter.name,i2cbus->slave_addr); 253 val, i2cbus->adapter.name,dvo->slave_addr);
252 goto fail; 254 goto fail;
253 } 255 }
254 256
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d2fd95dbd034..a9b896289680 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -123,19 +123,20 @@ static char *ch7xxx_get_id(uint8_t vid)
123static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) 123static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
124{ 124{
125 struct ch7xxx_priv *ch7xxx= dvo->dev_priv; 125 struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
126 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 126 struct i2c_adapter *adapter = dvo->i2c_bus;
127 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
127 u8 out_buf[2]; 128 u8 out_buf[2];
128 u8 in_buf[2]; 129 u8 in_buf[2];
129 130
130 struct i2c_msg msgs[] = { 131 struct i2c_msg msgs[] = {
131 { 132 {
132 .addr = i2cbus->slave_addr, 133 .addr = dvo->slave_addr,
133 .flags = 0, 134 .flags = 0,
134 .len = 1, 135 .len = 1,
135 .buf = out_buf, 136 .buf = out_buf,
136 }, 137 },
137 { 138 {
138 .addr = i2cbus->slave_addr, 139 .addr = dvo->slave_addr,
139 .flags = I2C_M_RD, 140 .flags = I2C_M_RD,
140 .len = 1, 141 .len = 1,
141 .buf = in_buf, 142 .buf = in_buf,
@@ -152,7 +153,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
152 153
153 if (!ch7xxx->quiet) { 154 if (!ch7xxx->quiet) {
154 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 155 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
155 addr, i2cbus->adapter.name, i2cbus->slave_addr); 156 addr, i2cbus->adapter.name, dvo->slave_addr);
156 } 157 }
157 return false; 158 return false;
158} 159}
@@ -161,10 +162,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
161static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) 162static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
162{ 163{
163 struct ch7xxx_priv *ch7xxx = dvo->dev_priv; 164 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
164 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 165 struct i2c_adapter *adapter = dvo->i2c_bus;
166 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
165 uint8_t out_buf[2]; 167 uint8_t out_buf[2];
166 struct i2c_msg msg = { 168 struct i2c_msg msg = {
167 .addr = i2cbus->slave_addr, 169 .addr = dvo->slave_addr,
168 .flags = 0, 170 .flags = 0,
169 .len = 2, 171 .len = 2,
170 .buf = out_buf, 172 .buf = out_buf,
@@ -178,14 +180,14 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
178 180
179 if (!ch7xxx->quiet) { 181 if (!ch7xxx->quiet) {
180 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 182 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
181 addr, i2cbus->adapter.name, i2cbus->slave_addr); 183 addr, i2cbus->adapter.name, dvo->slave_addr);
182 } 184 }
183 185
184 return false; 186 return false;
185} 187}
186 188
187static bool ch7xxx_init(struct intel_dvo_device *dvo, 189static bool ch7xxx_init(struct intel_dvo_device *dvo,
188 struct intel_i2c_chan *i2cbus) 190 struct i2c_adapter *adapter)
189{ 191{
190 /* this will detect the CH7xxx chip on the specified i2c bus */ 192 /* this will detect the CH7xxx chip on the specified i2c bus */
191 struct ch7xxx_priv *ch7xxx; 193 struct ch7xxx_priv *ch7xxx;
@@ -196,8 +198,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
196 if (ch7xxx == NULL) 198 if (ch7xxx == NULL)
197 return false; 199 return false;
198 200
199 dvo->i2c_bus = i2cbus; 201 dvo->i2c_bus = adapter;
200 dvo->i2c_bus->slave_addr = dvo->slave_addr;
201 dvo->dev_priv = ch7xxx; 202 dvo->dev_priv = ch7xxx;
202 ch7xxx->quiet = true; 203 ch7xxx->quiet = true;
203 204
@@ -207,7 +208,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
207 name = ch7xxx_get_id(vendor); 208 name = ch7xxx_get_id(vendor);
208 if (!name) { 209 if (!name) {
209 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", 210 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
210 vendor, i2cbus->adapter.name, i2cbus->slave_addr); 211 vendor, adapter->name, dvo->slave_addr);
211 goto out; 212 goto out;
212 } 213 }
213 214
@@ -217,7 +218,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
217 218
218 if (device != CH7xxx_DID) { 219 if (device != CH7xxx_DID) {
219 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", 220 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
220 vendor, i2cbus->adapter.name, i2cbus->slave_addr); 221 vendor, adapter->name, dvo->slave_addr);
221 goto out; 222 goto out;
222 } 223 }
223 224
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 0c8d375e8e37..aa176f9921fe 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -169,13 +169,14 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
169static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) 169static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
170{ 170{
171 struct ivch_priv *priv = dvo->dev_priv; 171 struct ivch_priv *priv = dvo->dev_priv;
172 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 172 struct i2c_adapter *adapter = dvo->i2c_bus;
173 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
173 u8 out_buf[1]; 174 u8 out_buf[1];
174 u8 in_buf[2]; 175 u8 in_buf[2];
175 176
176 struct i2c_msg msgs[] = { 177 struct i2c_msg msgs[] = {
177 { 178 {
178 .addr = i2cbus->slave_addr, 179 .addr = dvo->slave_addr,
179 .flags = I2C_M_RD, 180 .flags = I2C_M_RD,
180 .len = 0, 181 .len = 0,
181 }, 182 },
@@ -186,7 +187,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
186 .buf = out_buf, 187 .buf = out_buf,
187 }, 188 },
188 { 189 {
189 .addr = i2cbus->slave_addr, 190 .addr = dvo->slave_addr,
190 .flags = I2C_M_RD | I2C_M_NOSTART, 191 .flags = I2C_M_RD | I2C_M_NOSTART,
191 .len = 2, 192 .len = 2,
192 .buf = in_buf, 193 .buf = in_buf,
@@ -202,7 +203,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
202 203
203 if (!priv->quiet) { 204 if (!priv->quiet) {
204 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 205 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
205 addr, i2cbus->adapter.name, i2cbus->slave_addr); 206 addr, i2cbus->adapter.name, dvo->slave_addr);
206 } 207 }
207 return false; 208 return false;
208} 209}
@@ -211,10 +212,11 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
211static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) 212static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
212{ 213{
213 struct ivch_priv *priv = dvo->dev_priv; 214 struct ivch_priv *priv = dvo->dev_priv;
214 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 215 struct i2c_adapter *adapter = dvo->i2c_bus;
216 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
215 u8 out_buf[3]; 217 u8 out_buf[3];
216 struct i2c_msg msg = { 218 struct i2c_msg msg = {
217 .addr = i2cbus->slave_addr, 219 .addr = dvo->slave_addr,
218 .flags = 0, 220 .flags = 0,
219 .len = 3, 221 .len = 3,
220 .buf = out_buf, 222 .buf = out_buf,
@@ -229,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
229 231
230 if (!priv->quiet) { 232 if (!priv->quiet) {
231 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 233 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
232 addr, i2cbus->adapter.name, i2cbus->slave_addr); 234 addr, i2cbus->adapter.name, dvo->slave_addr);
233 } 235 }
234 236
235 return false; 237 return false;
@@ -237,7 +239,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
237 239
238/** Probes the given bus and slave address for an ivch */ 240/** Probes the given bus and slave address for an ivch */
239static bool ivch_init(struct intel_dvo_device *dvo, 241static bool ivch_init(struct intel_dvo_device *dvo,
240 struct intel_i2c_chan *i2cbus) 242 struct i2c_adapter *adapter)
241{ 243{
242 struct ivch_priv *priv; 244 struct ivch_priv *priv;
243 uint16_t temp; 245 uint16_t temp;
@@ -246,8 +248,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
246 if (priv == NULL) 248 if (priv == NULL)
247 return false; 249 return false;
248 250
249 dvo->i2c_bus = i2cbus; 251 dvo->i2c_bus = adapter;
250 dvo->i2c_bus->slave_addr = dvo->slave_addr;
251 dvo->dev_priv = priv; 252 dvo->dev_priv = priv;
252 priv->quiet = true; 253 priv->quiet = true;
253 254
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 033a4bb070b2..e1c1f7341e5c 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -76,19 +76,20 @@ struct sil164_priv {
76static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) 76static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
77{ 77{
78 struct sil164_priv *sil = dvo->dev_priv; 78 struct sil164_priv *sil = dvo->dev_priv;
79 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 79 struct i2c_adapter *adapter = dvo->i2c_bus;
80 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
80 u8 out_buf[2]; 81 u8 out_buf[2];
81 u8 in_buf[2]; 82 u8 in_buf[2];
82 83
83 struct i2c_msg msgs[] = { 84 struct i2c_msg msgs[] = {
84 { 85 {
85 .addr = i2cbus->slave_addr, 86 .addr = dvo->slave_addr,
86 .flags = 0, 87 .flags = 0,
87 .len = 1, 88 .len = 1,
88 .buf = out_buf, 89 .buf = out_buf,
89 }, 90 },
90 { 91 {
91 .addr = i2cbus->slave_addr, 92 .addr = dvo->slave_addr,
92 .flags = I2C_M_RD, 93 .flags = I2C_M_RD,
93 .len = 1, 94 .len = 1,
94 .buf = in_buf, 95 .buf = in_buf,
@@ -105,7 +106,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
105 106
106 if (!sil->quiet) { 107 if (!sil->quiet) {
107 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 108 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
108 addr, i2cbus->adapter.name, i2cbus->slave_addr); 109 addr, i2cbus->adapter.name, dvo->slave_addr);
109 } 110 }
110 return false; 111 return false;
111} 112}
@@ -113,10 +114,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
113static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) 114static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
114{ 115{
115 struct sil164_priv *sil= dvo->dev_priv; 116 struct sil164_priv *sil= dvo->dev_priv;
116 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 117 struct i2c_adapter *adapter = dvo->i2c_bus;
118 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
117 uint8_t out_buf[2]; 119 uint8_t out_buf[2];
118 struct i2c_msg msg = { 120 struct i2c_msg msg = {
119 .addr = i2cbus->slave_addr, 121 .addr = dvo->slave_addr,
120 .flags = 0, 122 .flags = 0,
121 .len = 2, 123 .len = 2,
122 .buf = out_buf, 124 .buf = out_buf,
@@ -130,7 +132,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
130 132
131 if (!sil->quiet) { 133 if (!sil->quiet) {
132 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 134 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
133 addr, i2cbus->adapter.name, i2cbus->slave_addr); 135 addr, i2cbus->adapter.name, dvo->slave_addr);
134 } 136 }
135 137
136 return false; 138 return false;
@@ -138,7 +140,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
138 140
139/* Silicon Image 164 driver for chip on i2c bus */ 141/* Silicon Image 164 driver for chip on i2c bus */
140static bool sil164_init(struct intel_dvo_device *dvo, 142static bool sil164_init(struct intel_dvo_device *dvo,
141 struct intel_i2c_chan *i2cbus) 143 struct i2c_adapter *adapter)
142{ 144{
143 /* this will detect the SIL164 chip on the specified i2c bus */ 145 /* this will detect the SIL164 chip on the specified i2c bus */
144 struct sil164_priv *sil; 146 struct sil164_priv *sil;
@@ -148,8 +150,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
148 if (sil == NULL) 150 if (sil == NULL)
149 return false; 151 return false;
150 152
151 dvo->i2c_bus = i2cbus; 153 dvo->i2c_bus = adapter;
152 dvo->i2c_bus->slave_addr = dvo->slave_addr;
153 dvo->dev_priv = sil; 154 dvo->dev_priv = sil;
154 sil->quiet = true; 155 sil->quiet = true;
155 156
@@ -158,7 +159,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
158 159
159 if (ch != (SIL164_VID & 0xff)) { 160 if (ch != (SIL164_VID & 0xff)) {
160 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", 161 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
161 ch, i2cbus->adapter.name, i2cbus->slave_addr); 162 ch, adapter->name, dvo->slave_addr);
162 goto out; 163 goto out;
163 } 164 }
164 165
@@ -167,7 +168,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
167 168
168 if (ch != (SIL164_DID & 0xff)) { 169 if (ch != (SIL164_DID & 0xff)) {
169 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", 170 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
170 ch, i2cbus->adapter.name, i2cbus->slave_addr); 171 ch, adapter->name, dvo->slave_addr);
171 goto out; 172 goto out;
172 } 173 }
173 sil->quiet = false; 174 sil->quiet = false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 207fda806ebf..9ecc907384ec 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -101,19 +101,20 @@ struct tfp410_priv {
101static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) 101static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
102{ 102{
103 struct tfp410_priv *tfp = dvo->dev_priv; 103 struct tfp410_priv *tfp = dvo->dev_priv;
104 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 104 struct i2c_adapter *adapter = dvo->i2c_bus;
105 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
105 u8 out_buf[2]; 106 u8 out_buf[2];
106 u8 in_buf[2]; 107 u8 in_buf[2];
107 108
108 struct i2c_msg msgs[] = { 109 struct i2c_msg msgs[] = {
109 { 110 {
110 .addr = i2cbus->slave_addr, 111 .addr = dvo->slave_addr,
111 .flags = 0, 112 .flags = 0,
112 .len = 1, 113 .len = 1,
113 .buf = out_buf, 114 .buf = out_buf,
114 }, 115 },
115 { 116 {
116 .addr = i2cbus->slave_addr, 117 .addr = dvo->slave_addr,
117 .flags = I2C_M_RD, 118 .flags = I2C_M_RD,
118 .len = 1, 119 .len = 1,
119 .buf = in_buf, 120 .buf = in_buf,
@@ -130,7 +131,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
130 131
131 if (!tfp->quiet) { 132 if (!tfp->quiet) {
132 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 133 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
133 addr, i2cbus->adapter.name, i2cbus->slave_addr); 134 addr, i2cbus->adapter.name, dvo->slave_addr);
134 } 135 }
135 return false; 136 return false;
136} 137}
@@ -138,10 +139,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
138static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) 139static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
139{ 140{
140 struct tfp410_priv *tfp = dvo->dev_priv; 141 struct tfp410_priv *tfp = dvo->dev_priv;
141 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 142 struct i2c_adapter *adapter = dvo->i2c_bus;
143 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
142 uint8_t out_buf[2]; 144 uint8_t out_buf[2];
143 struct i2c_msg msg = { 145 struct i2c_msg msg = {
144 .addr = i2cbus->slave_addr, 146 .addr = dvo->slave_addr,
145 .flags = 0, 147 .flags = 0,
146 .len = 2, 148 .len = 2,
147 .buf = out_buf, 149 .buf = out_buf,
@@ -155,7 +157,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
155 157
156 if (!tfp->quiet) { 158 if (!tfp->quiet) {
157 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 159 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
158 addr, i2cbus->adapter.name, i2cbus->slave_addr); 160 addr, i2cbus->adapter.name, dvo->slave_addr);
159 } 161 }
160 162
161 return false; 163 return false;
@@ -174,7 +176,7 @@ static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
174 176
175/* Ti TFP410 driver for chip on i2c bus */ 177/* Ti TFP410 driver for chip on i2c bus */
176static bool tfp410_init(struct intel_dvo_device *dvo, 178static bool tfp410_init(struct intel_dvo_device *dvo,
177 struct intel_i2c_chan *i2cbus) 179 struct i2c_adapter *adapter)
178{ 180{
179 /* this will detect the tfp410 chip on the specified i2c bus */ 181 /* this will detect the tfp410 chip on the specified i2c bus */
180 struct tfp410_priv *tfp; 182 struct tfp410_priv *tfp;
@@ -184,20 +186,19 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
184 if (tfp == NULL) 186 if (tfp == NULL)
185 return false; 187 return false;
186 188
187 dvo->i2c_bus = i2cbus; 189 dvo->i2c_bus = adapter;
188 dvo->i2c_bus->slave_addr = dvo->slave_addr;
189 dvo->dev_priv = tfp; 190 dvo->dev_priv = tfp;
190 tfp->quiet = true; 191 tfp->quiet = true;
191 192
192 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { 193 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
193 DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", 194 DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
194 id, i2cbus->adapter.name, i2cbus->slave_addr); 195 id, adapter->name, dvo->slave_addr);
195 goto out; 196 goto out;
196 } 197 }
197 198
198 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { 199 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
199 DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", 200 DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
200 id, i2cbus->adapter.name, i2cbus->slave_addr); 201 id, adapter->name, dvo->slave_addr);
201 goto out; 202 goto out;
202 } 203 }
203 tfp->quiet = false; 204 tfp->quiet = false;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f112c769d533..50d1f782768c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -846,7 +846,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
846 return 0; 846 return 0;
847 } 847 }
848 848
849 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); 849 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
850 850
851 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 851 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
852 852
@@ -885,8 +885,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
885 * some RAM for the framebuffer at early boot. This code figures out 885 * some RAM for the framebuffer at early boot. This code figures out
886 * how much was set aside so we can use it for our own purposes. 886 * how much was set aside so we can use it for our own purposes.
887 */ 887 */
888static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, 888static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
889 unsigned long *preallocated_size) 889 uint32_t *preallocated_size)
890{ 890{
891 struct pci_dev *bridge_dev; 891 struct pci_dev *bridge_dev;
892 u16 tmp = 0; 892 u16 tmp = 0;
@@ -984,10 +984,11 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
984 return 0; 984 return 0;
985} 985}
986 986
987static int i915_load_modeset_init(struct drm_device *dev) 987static int i915_load_modeset_init(struct drm_device *dev,
988 unsigned long prealloc_size,
989 unsigned long agp_size)
988{ 990{
989 struct drm_i915_private *dev_priv = dev->dev_private; 991 struct drm_i915_private *dev_priv = dev->dev_private;
990 unsigned long agp_size, prealloc_size;
991 int fb_bar = IS_I9XX(dev) ? 2 : 0; 992 int fb_bar = IS_I9XX(dev) ? 2 : 0;
992 int ret = 0; 993 int ret = 0;
993 994
@@ -1002,10 +1003,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1002 if (IS_I965G(dev) || IS_G33(dev)) 1003 if (IS_I965G(dev) || IS_G33(dev))
1003 dev_priv->cursor_needs_physical = false; 1004 dev_priv->cursor_needs_physical = false;
1004 1005
1005 ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
1006 if (ret)
1007 goto out;
1008
1009 /* Basic memrange allocator for stolen space (aka vram) */ 1006 /* Basic memrange allocator for stolen space (aka vram) */
1010 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1007 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1011 1008
@@ -1082,6 +1079,44 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1082 master->driver_priv = NULL; 1079 master->driver_priv = NULL;
1083} 1080}
1084 1081
1082static void i915_get_mem_freq(struct drm_device *dev)
1083{
1084 drm_i915_private_t *dev_priv = dev->dev_private;
1085 u32 tmp;
1086
1087 if (!IS_IGD(dev))
1088 return;
1089
1090 tmp = I915_READ(CLKCFG);
1091
1092 switch (tmp & CLKCFG_FSB_MASK) {
1093 case CLKCFG_FSB_533:
1094 dev_priv->fsb_freq = 533; /* 133*4 */
1095 break;
1096 case CLKCFG_FSB_800:
1097 dev_priv->fsb_freq = 800; /* 200*4 */
1098 break;
1099 case CLKCFG_FSB_667:
1100 dev_priv->fsb_freq = 667; /* 167*4 */
1101 break;
1102 case CLKCFG_FSB_400:
1103 dev_priv->fsb_freq = 400; /* 100*4 */
1104 break;
1105 }
1106
1107 switch (tmp & CLKCFG_MEM_MASK) {
1108 case CLKCFG_MEM_533:
1109 dev_priv->mem_freq = 533;
1110 break;
1111 case CLKCFG_MEM_667:
1112 dev_priv->mem_freq = 667;
1113 break;
1114 case CLKCFG_MEM_800:
1115 dev_priv->mem_freq = 800;
1116 break;
1117 }
1118}
1119
1085/** 1120/**
1086 * i915_driver_load - setup chip and create an initial config 1121 * i915_driver_load - setup chip and create an initial config
1087 * @dev: DRM device 1122 * @dev: DRM device
@@ -1098,6 +1133,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1098 struct drm_i915_private *dev_priv = dev->dev_private; 1133 struct drm_i915_private *dev_priv = dev->dev_private;
1099 resource_size_t base, size; 1134 resource_size_t base, size;
1100 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1135 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
1136 uint32_t agp_size, prealloc_size;
1101 1137
1102 /* i915 has 4 more counters */ 1138 /* i915 has 4 more counters */
1103 dev->counters += 4; 1139 dev->counters += 4;
@@ -1146,9 +1182,29 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1146 "performance may suffer.\n"); 1182 "performance may suffer.\n");
1147 } 1183 }
1148 1184
1185 ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
1186 if (ret)
1187 goto out_iomapfree;
1188
1189 dev_priv->wq = create_workqueue("i915");
1190 if (dev_priv->wq == NULL) {
1191 DRM_ERROR("Failed to create our workqueue.\n");
1192 ret = -ENOMEM;
1193 goto out_iomapfree;
1194 }
1195
1149 /* enable GEM by default */ 1196 /* enable GEM by default */
1150 dev_priv->has_gem = 1; 1197 dev_priv->has_gem = 1;
1151 1198
1199 if (prealloc_size > agp_size * 3 / 4) {
1200 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
1201 "memory stolen.\n",
1202 prealloc_size / 1024, agp_size / 1024);
1203 DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
1204 "updating the BIOS to fix).\n");
1205 dev_priv->has_gem = 0;
1206 }
1207
1152 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1208 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1153 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1209 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1154 if (IS_G4X(dev) || IS_IGDNG(dev)) { 1210 if (IS_G4X(dev) || IS_IGDNG(dev)) {
@@ -1162,9 +1218,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1162 if (!I915_NEED_GFX_HWS(dev)) { 1218 if (!I915_NEED_GFX_HWS(dev)) {
1163 ret = i915_init_phys_hws(dev); 1219 ret = i915_init_phys_hws(dev);
1164 if (ret != 0) 1220 if (ret != 0)
1165 goto out_iomapfree; 1221 goto out_workqueue_free;
1166 } 1222 }
1167 1223
1224 i915_get_mem_freq(dev);
1225
1168 /* On the 945G/GM, the chipset reports the MSI capability on the 1226 /* On the 945G/GM, the chipset reports the MSI capability on the
1169 * integrated graphics even though the support isn't actually there 1227 * integrated graphics even though the support isn't actually there
1170 * according to the published specs. It doesn't appear to function 1228 * according to the published specs. It doesn't appear to function
@@ -1180,6 +1238,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1180 pci_enable_msi(dev->pdev); 1238 pci_enable_msi(dev->pdev);
1181 1239
1182 spin_lock_init(&dev_priv->user_irq_lock); 1240 spin_lock_init(&dev_priv->user_irq_lock);
1241 spin_lock_init(&dev_priv->error_lock);
1183 dev_priv->user_irq_refcount = 0; 1242 dev_priv->user_irq_refcount = 0;
1184 1243
1185 ret = drm_vblank_init(dev, I915_NUM_PIPE); 1244 ret = drm_vblank_init(dev, I915_NUM_PIPE);
@@ -1190,10 +1249,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1190 } 1249 }
1191 1250
1192 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1251 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1193 ret = i915_load_modeset_init(dev); 1252 ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
1194 if (ret < 0) { 1253 if (ret < 0) {
1195 DRM_ERROR("failed to init modeset\n"); 1254 DRM_ERROR("failed to init modeset\n");
1196 goto out_rmmap; 1255 goto out_workqueue_free;
1197 } 1256 }
1198 } 1257 }
1199 1258
@@ -1204,6 +1263,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1204 1263
1205 return 0; 1264 return 0;
1206 1265
1266out_workqueue_free:
1267 destroy_workqueue(dev_priv->wq);
1207out_iomapfree: 1268out_iomapfree:
1208 io_mapping_free(dev_priv->mm.gtt_mapping); 1269 io_mapping_free(dev_priv->mm.gtt_mapping);
1209out_rmmap: 1270out_rmmap:
@@ -1217,6 +1278,8 @@ int i915_driver_unload(struct drm_device *dev)
1217{ 1278{
1218 struct drm_i915_private *dev_priv = dev->dev_private; 1279 struct drm_i915_private *dev_priv = dev->dev_private;
1219 1280
1281 destroy_workqueue(dev_priv->wq);
1282
1220 io_mapping_free(dev_priv->mm.gtt_mapping); 1283 io_mapping_free(dev_priv->mm.gtt_mapping);
1221 if (dev_priv->mm.gtt_mtrr >= 0) { 1284 if (dev_priv->mm.gtt_mtrr >= 0) {
1222 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 1285 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 98560e1e899a..fc4b68aa2d05 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -35,6 +35,7 @@
35 35
36#include "drm_pciids.h" 36#include "drm_pciids.h"
37#include <linux/console.h> 37#include <linux/console.h>
38#include "drm_crtc_helper.h"
38 39
39static unsigned int i915_modeset = -1; 40static unsigned int i915_modeset = -1;
40module_param_named(modeset, i915_modeset, int, 0400); 41module_param_named(modeset, i915_modeset, int, 0400);
@@ -57,8 +58,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
57 struct drm_i915_private *dev_priv = dev->dev_private; 58 struct drm_i915_private *dev_priv = dev->dev_private;
58 59
59 if (!dev || !dev_priv) { 60 if (!dev || !dev_priv) {
60 printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); 61 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
61 printk(KERN_ERR "DRM not initialized, aborting suspend.\n"); 62 DRM_ERROR("DRM not initialized, aborting suspend.\n");
62 return -ENODEV; 63 return -ENODEV;
63 } 64 }
64 65
@@ -67,8 +68,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
67 68
68 pci_save_state(dev->pdev); 69 pci_save_state(dev->pdev);
69 70
70 i915_save_state(dev);
71
72 /* If KMS is active, we do the leavevt stuff here */ 71 /* If KMS is active, we do the leavevt stuff here */
73 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 72 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
74 if (i915_gem_idle(dev)) 73 if (i915_gem_idle(dev))
@@ -77,6 +76,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
77 drm_irq_uninstall(dev); 76 drm_irq_uninstall(dev);
78 } 77 }
79 78
79 i915_save_state(dev);
80
80 intel_opregion_free(dev, 1); 81 intel_opregion_free(dev, 1);
81 82
82 if (state.event == PM_EVENT_SUSPEND) { 83 if (state.event == PM_EVENT_SUSPEND) {
@@ -115,6 +116,10 @@ static int i915_resume(struct drm_device *dev)
115 116
116 drm_irq_install(dev); 117 drm_irq_install(dev);
117 } 118 }
119 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
120 /* Resume the modeset for every activated CRTC */
121 drm_helper_resume_force_mode(dev);
122 }
118 123
119 return ret; 124 return ret;
120} 125}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7a84f04e8439..7537f57d8a87 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -133,6 +133,22 @@ struct sdvo_device_mapping {
133 u8 initialized; 133 u8 initialized;
134}; 134};
135 135
136struct drm_i915_error_state {
137 u32 eir;
138 u32 pgtbl_er;
139 u32 pipeastat;
140 u32 pipebstat;
141 u32 ipeir;
142 u32 ipehr;
143 u32 instdone;
144 u32 acthd;
145 u32 instpm;
146 u32 instps;
147 u32 instdone1;
148 u32 seqno;
149 struct timeval time;
150};
151
136typedef struct drm_i915_private { 152typedef struct drm_i915_private {
137 struct drm_device *dev; 153 struct drm_device *dev;
138 154
@@ -203,12 +219,20 @@ typedef struct drm_i915_private {
203 unsigned int lvds_vbt:1; 219 unsigned int lvds_vbt:1;
204 unsigned int int_crt_support:1; 220 unsigned int int_crt_support:1;
205 unsigned int lvds_use_ssc:1; 221 unsigned int lvds_use_ssc:1;
222 unsigned int edp_support:1;
206 int lvds_ssc_freq; 223 int lvds_ssc_freq;
207 224
208 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 225 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
209 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 226 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
210 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 227 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
211 228
229 unsigned int fsb_freq, mem_freq;
230
231 spinlock_t error_lock;
232 struct drm_i915_error_state *first_error;
233 struct work_struct error_work;
234 struct workqueue_struct *wq;
235
212 /* Register state */ 236 /* Register state */
213 u8 saveLBB; 237 u8 saveLBB;
214 u32 saveDSPACNTR; 238 u32 saveDSPACNTR;
@@ -306,6 +330,17 @@ typedef struct drm_i915_private {
306 u32 saveCURBPOS; 330 u32 saveCURBPOS;
307 u32 saveCURBBASE; 331 u32 saveCURBBASE;
308 u32 saveCURSIZE; 332 u32 saveCURSIZE;
333 u32 saveDP_B;
334 u32 saveDP_C;
335 u32 saveDP_D;
336 u32 savePIPEA_GMCH_DATA_M;
337 u32 savePIPEB_GMCH_DATA_M;
338 u32 savePIPEA_GMCH_DATA_N;
339 u32 savePIPEB_GMCH_DATA_N;
340 u32 savePIPEA_DP_LINK_M;
341 u32 savePIPEB_DP_LINK_M;
342 u32 savePIPEA_DP_LINK_N;
343 u32 savePIPEB_DP_LINK_N;
309 344
310 struct { 345 struct {
311 struct drm_mm gtt_space; 346 struct drm_mm gtt_space;
@@ -457,9 +492,6 @@ struct drm_i915_gem_object {
457 */ 492 */
458 int fence_reg; 493 int fence_reg;
459 494
460 /** Boolean whether this object has a valid gtt offset. */
461 int gtt_bound;
462
463 /** How many users have pinned this object in GTT space */ 495 /** How many users have pinned this object in GTT space */
464 int pin_count; 496 int pin_count;
465 497
@@ -644,6 +676,7 @@ void i915_gem_free_object(struct drm_gem_object *obj);
644int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 676int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
645void i915_gem_object_unpin(struct drm_gem_object *obj); 677void i915_gem_object_unpin(struct drm_gem_object *obj);
646int i915_gem_object_unbind(struct drm_gem_object *obj); 678int i915_gem_object_unbind(struct drm_gem_object *obj);
679void i915_gem_release_mmap(struct drm_gem_object *obj);
647void i915_gem_lastclose(struct drm_device *dev); 680void i915_gem_lastclose(struct drm_device *dev);
648uint32_t i915_get_gem_seqno(struct drm_device *dev); 681uint32_t i915_get_gem_seqno(struct drm_device *dev);
649int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 682int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
@@ -857,7 +890,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
857#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ 890#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
858 IS_I915GM(dev))) 891 IS_I915GM(dev)))
859#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 892#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
893#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
894#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
860#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) 895#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
896/* dsparb controlled by hw only */
897#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
861 898
862#define PRIMARY_RINGBUFFER_SIZE (128*1024) 899#define PRIMARY_RINGBUFFER_SIZE (128*1024)
863 900
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fd2b8bdffe3f..140bee142fc2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1006,7 +1006,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1006 1006
1007 mutex_lock(&dev->struct_mutex); 1007 mutex_lock(&dev->struct_mutex);
1008#if WATCH_BUF 1008#if WATCH_BUF
1009 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 1009 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1010 obj, obj->size, read_domains, write_domain); 1010 obj, obj->size, read_domains, write_domain);
1011#endif 1011#endif
1012 if (read_domains & I915_GEM_DOMAIN_GTT) { 1012 if (read_domains & I915_GEM_DOMAIN_GTT) {
@@ -1050,7 +1050,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1050 } 1050 }
1051 1051
1052#if WATCH_BUF 1052#if WATCH_BUF
1053 DRM_INFO("%s: sw_finish %d (%p %d)\n", 1053 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1054 __func__, args->handle, obj, obj->size); 1054 __func__, args->handle, obj, obj->size);
1055#endif 1055#endif
1056 obj_priv = obj->driver_private; 1056 obj_priv = obj->driver_private;
@@ -1252,6 +1252,31 @@ out_free_list:
1252 return ret; 1252 return ret;
1253} 1253}
1254 1254
1255/**
1256 * i915_gem_release_mmap - remove physical page mappings
1257 * @obj: obj in question
1258 *
1259 * Preserve the reservation of the mmaping with the DRM core code, but
1260 * relinquish ownership of the pages back to the system.
1261 *
1262 * It is vital that we remove the page mapping if we have mapped a tiled
1263 * object through the GTT and then lose the fence register due to
1264 * resource pressure. Similarly if the object has been moved out of the
1265 * aperture, than pages mapped into userspace must be revoked. Removing the
1266 * mapping will then trigger a page fault on the next user access, allowing
1267 * fixup by i915_gem_fault().
1268 */
1269void
1270i915_gem_release_mmap(struct drm_gem_object *obj)
1271{
1272 struct drm_device *dev = obj->dev;
1273 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1274
1275 if (dev->dev_mapping)
1276 unmap_mapping_range(dev->dev_mapping,
1277 obj_priv->mmap_offset, obj->size, 1);
1278}
1279
1255static void 1280static void
1256i915_gem_free_mmap_offset(struct drm_gem_object *obj) 1281i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1257{ 1282{
@@ -1545,7 +1570,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1545 } 1570 }
1546 1571
1547 if (was_empty && !dev_priv->mm.suspended) 1572 if (was_empty && !dev_priv->mm.suspended)
1548 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 1573 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1549 return seqno; 1574 return seqno;
1550} 1575}
1551 1576
@@ -1694,7 +1719,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
1694 i915_gem_retire_requests(dev); 1719 i915_gem_retire_requests(dev);
1695 if (!dev_priv->mm.suspended && 1720 if (!dev_priv->mm.suspended &&
1696 !list_empty(&dev_priv->mm.request_list)) 1721 !list_empty(&dev_priv->mm.request_list))
1697 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 1722 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1698 mutex_unlock(&dev->struct_mutex); 1723 mutex_unlock(&dev->struct_mutex);
1699} 1724}
1700 1725
@@ -1861,7 +1886,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1861{ 1886{
1862 struct drm_device *dev = obj->dev; 1887 struct drm_device *dev = obj->dev;
1863 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1888 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1864 loff_t offset;
1865 int ret = 0; 1889 int ret = 0;
1866 1890
1867#if WATCH_BUF 1891#if WATCH_BUF
@@ -1898,9 +1922,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1898 BUG_ON(obj_priv->active); 1922 BUG_ON(obj_priv->active);
1899 1923
1900 /* blow away mappings if mapped through GTT */ 1924 /* blow away mappings if mapped through GTT */
1901 offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT; 1925 i915_gem_release_mmap(obj);
1902 if (dev->dev_mapping)
1903 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
1904 1926
1905 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1927 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1906 i915_gem_clear_fence_reg(obj); 1928 i915_gem_clear_fence_reg(obj);
@@ -2222,7 +2244,6 @@ try_again:
2222 /* None available, try to steal one or wait for a user to finish */ 2244 /* None available, try to steal one or wait for a user to finish */
2223 if (i == dev_priv->num_fence_regs) { 2245 if (i == dev_priv->num_fence_regs) {
2224 uint32_t seqno = dev_priv->mm.next_gem_seqno; 2246 uint32_t seqno = dev_priv->mm.next_gem_seqno;
2225 loff_t offset;
2226 2247
2227 if (avail == 0) 2248 if (avail == 0)
2228 return -ENOSPC; 2249 return -ENOSPC;
@@ -2274,10 +2295,7 @@ try_again:
2274 * Zap this virtual mapping so we can set up a fence again 2295 * Zap this virtual mapping so we can set up a fence again
2275 * for this object next time we need it. 2296 * for this object next time we need it.
2276 */ 2297 */
2277 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT; 2298 i915_gem_release_mmap(reg->obj);
2278 if (dev->dev_mapping)
2279 unmap_mapping_range(dev->dev_mapping, offset,
2280 reg->obj->size, 1);
2281 old_obj_priv->fence_reg = I915_FENCE_REG_NONE; 2299 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2282 } 2300 }
2283 2301
@@ -2423,7 +2441,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2423 } 2441 }
2424 2442
2425#if WATCH_BUF 2443#if WATCH_BUF
2426 DRM_INFO("Binding object of size %d at 0x%08x\n", 2444 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2427 obj->size, obj_priv->gtt_offset); 2445 obj->size, obj_priv->gtt_offset);
2428#endif 2446#endif
2429 ret = i915_gem_object_get_pages(obj); 2447 ret = i915_gem_object_get_pages(obj);
@@ -4227,6 +4245,7 @@ i915_gem_lastclose(struct drm_device *dev)
4227void 4245void
4228i915_gem_load(struct drm_device *dev) 4246i915_gem_load(struct drm_device *dev)
4229{ 4247{
4248 int i;
4230 drm_i915_private_t *dev_priv = dev->dev_private; 4249 drm_i915_private_t *dev_priv = dev->dev_private;
4231 4250
4232 spin_lock_init(&dev_priv->mm.active_list_lock); 4251 spin_lock_init(&dev_priv->mm.active_list_lock);
@@ -4246,6 +4265,18 @@ i915_gem_load(struct drm_device *dev)
4246 else 4265 else
4247 dev_priv->num_fence_regs = 8; 4266 dev_priv->num_fence_regs = 8;
4248 4267
4268 /* Initialize fence registers to zero */
4269 if (IS_I965G(dev)) {
4270 for (i = 0; i < 16; i++)
4271 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4272 } else {
4273 for (i = 0; i < 8; i++)
4274 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4275 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4276 for (i = 0; i < 8; i++)
4277 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4278 }
4279
4249 i915_gem_detect_bit_6_swizzle(dev); 4280 i915_gem_detect_bit_6_swizzle(dev);
4250} 4281}
4251 4282
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 8d0b943e2c5a..e602614bd3f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -87,7 +87,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
87 chunk_len = page_len - chunk; 87 chunk_len = page_len - chunk;
88 if (chunk_len > 128) 88 if (chunk_len > 128)
89 chunk_len = 128; 89 chunk_len = 128;
90 i915_gem_dump_page(obj_priv->page_list[page], 90 i915_gem_dump_page(obj_priv->pages[page],
91 chunk, chunk + chunk_len, 91 chunk, chunk + chunk_len,
92 obj_priv->gtt_offset + 92 obj_priv->gtt_offset +
93 page * PAGE_SIZE, 93 page * PAGE_SIZE,
@@ -143,7 +143,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
143 uint32_t *backing_map = NULL; 143 uint32_t *backing_map = NULL;
144 int bad_count = 0; 144 int bad_count = 0;
145 145
146 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", 146 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
147 __func__, obj, obj_priv->gtt_offset, handle, 147 __func__, obj, obj_priv->gtt_offset, handle,
148 obj->size / 1024); 148 obj->size / 1024);
149 149
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
157 for (page = 0; page < obj->size / PAGE_SIZE; page++) { 157 for (page = 0; page < obj->size / PAGE_SIZE; page++) {
158 int i; 158 int i;
159 159
160 backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); 160 backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
161 161
162 if (backing_map == NULL) { 162 if (backing_map == NULL) {
163 DRM_ERROR("failed to map backing page\n"); 163 DRM_ERROR("failed to map backing page\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 28146e405e87..cb3b97405fbf 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -75,11 +75,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
75 case ACTIVE_LIST: 75 case ACTIVE_LIST:
76 seq_printf(m, "Active:\n"); 76 seq_printf(m, "Active:\n");
77 lock = &dev_priv->mm.active_list_lock; 77 lock = &dev_priv->mm.active_list_lock;
78 spin_lock(lock);
79 head = &dev_priv->mm.active_list; 78 head = &dev_priv->mm.active_list;
80 break; 79 break;
81 case INACTIVE_LIST: 80 case INACTIVE_LIST:
82 seq_printf(m, "Inctive:\n"); 81 seq_printf(m, "Inactive:\n");
83 head = &dev_priv->mm.inactive_list; 82 head = &dev_priv->mm.inactive_list;
84 break; 83 break;
85 case FLUSHING_LIST: 84 case FLUSHING_LIST:
@@ -91,6 +90,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
91 return 0; 90 return 0;
92 } 91 }
93 92
93 if (lock)
94 spin_lock(lock);
94 list_for_each_entry(obj_priv, head, list) 95 list_for_each_entry(obj_priv, head, list)
95 { 96 {
96 struct drm_gem_object *obj = obj_priv->obj; 97 struct drm_gem_object *obj = obj_priv->obj;
@@ -104,7 +105,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
104 if (obj->name) 105 if (obj->name)
105 seq_printf(m, " (name: %d)", obj->name); 106 seq_printf(m, " (name: %d)", obj->name);
106 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 107 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
107 seq_printf(m, " (fence: %d)\n", obj_priv->fence_reg); 108 seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
109 if (obj_priv->gtt_space != NULL)
110 seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
111
108 seq_printf(m, "\n"); 112 seq_printf(m, "\n");
109 } 113 }
110 114
@@ -323,6 +327,41 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
323 return 0; 327 return 0;
324} 328}
325 329
330static int i915_error_state(struct seq_file *m, void *unused)
331{
332 struct drm_info_node *node = (struct drm_info_node *) m->private;
333 struct drm_device *dev = node->minor->dev;
334 drm_i915_private_t *dev_priv = dev->dev_private;
335 struct drm_i915_error_state *error;
336 unsigned long flags;
337
338 spin_lock_irqsave(&dev_priv->error_lock, flags);
339 if (!dev_priv->first_error) {
340 seq_printf(m, "no error state collected\n");
341 goto out;
342 }
343
344 error = dev_priv->first_error;
345
346 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
347 error->time.tv_usec);
348 seq_printf(m, "EIR: 0x%08x\n", error->eir);
349 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
350 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
351 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
352 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
353 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
354 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
355 if (IS_I965G(dev)) {
356 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
357 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
358 }
359
360out:
361 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
362
363 return 0;
364}
326 365
327static struct drm_info_list i915_gem_debugfs_list[] = { 366static struct drm_info_list i915_gem_debugfs_list[] = {
328 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 367 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
@@ -336,6 +375,7 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
336 {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, 375 {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
337 {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 376 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
338 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 377 {"i915_batchbuffers", i915_batchbuffer_info, 0},
378 {"i915_error_state", i915_error_state, 0},
339}; 379};
340#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) 380#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
341 381
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 5c1ceec49f5b..a2d527b22ec4 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -114,11 +114,13 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
114 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 114 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
115 115
116 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 116 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
117#ifdef CONFIG_PNP
117 if (mchbar_addr && 118 if (mchbar_addr &&
118 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { 119 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
119 ret = 0; 120 ret = 0;
120 goto out_put; 121 goto out_put;
121 } 122 }
123#endif
122 124
123 /* Get some space for it */ 125 /* Get some space for it */
124 ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, 126 ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
@@ -519,6 +521,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
519 goto err; 521 goto err;
520 } 522 }
521 523
524 /* If we've changed tiling, GTT-mappings of the object
525 * need to re-fault to ensure that the correct fence register
526 * setup is in place.
527 */
528 i915_gem_release_mmap(obj);
529
522 obj_priv->tiling_mode = args->tiling_mode; 530 obj_priv->tiling_mode = args->tiling_mode;
523 obj_priv->stride = args->stride; 531 obj_priv->stride = args->stride;
524 } 532 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b86b7b7130c6..83aee80e77a6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -26,6 +26,7 @@
26 * 26 *
27 */ 27 */
28 28
29#include <linux/sysrq.h>
29#include "drmP.h" 30#include "drmP.h"
30#include "drm.h" 31#include "drm.h"
31#include "i915_drm.h" 32#include "i915_drm.h"
@@ -41,9 +42,10 @@
41 * we leave them always unmasked in IMR and then control enabling them through 42 * we leave them always unmasked in IMR and then control enabling them through
42 * PIPESTAT alone. 43 * PIPESTAT alone.
43 */ 44 */
44#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ 45#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
45 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 46 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
46 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) 47 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
48 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
47 49
48/** Interrupts that we mask and unmask at runtime. */ 50/** Interrupts that we mask and unmask at runtime. */
49#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) 51#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
@@ -232,7 +234,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
232 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 234 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
233 hotplug_work); 235 hotplug_work);
234 struct drm_device *dev = dev_priv->dev; 236 struct drm_device *dev = dev_priv->dev;
235 237 struct drm_mode_config *mode_config = &dev->mode_config;
238 struct drm_connector *connector;
239
240 if (mode_config->num_connector) {
241 list_for_each_entry(connector, &mode_config->connector_list, head) {
242 struct intel_output *intel_output = to_intel_output(connector);
243
244 if (intel_output->hot_plug)
245 (*intel_output->hot_plug) (intel_output);
246 }
247 }
236 /* Just fire off a uevent and let userspace tell us what to do */ 248 /* Just fire off a uevent and let userspace tell us what to do */
237 drm_sysfs_hotplug_event(dev); 249 drm_sysfs_hotplug_event(dev);
238} 250}
@@ -278,6 +290,201 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
278 return ret; 290 return ret;
279} 291}
280 292
293/**
294 * i915_error_work_func - do process context error handling work
295 * @work: work struct
296 *
297 * Fire an error uevent so userspace can see that a hang or error
298 * was detected.
299 */
300static void i915_error_work_func(struct work_struct *work)
301{
302 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
303 error_work);
304 struct drm_device *dev = dev_priv->dev;
305 char *event_string = "ERROR=1";
306 char *envp[] = { event_string, NULL };
307
308 DRM_DEBUG("generating error event\n");
309
310 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
311}
312
313/**
314 * i915_capture_error_state - capture an error record for later analysis
315 * @dev: drm device
316 *
317 * Should be called when an error is detected (either a hang or an error
318 * interrupt) to capture error state from the time of the error. Fills
319 * out a structure which becomes available in debugfs for user level tools
320 * to pick up.
321 */
322static void i915_capture_error_state(struct drm_device *dev)
323{
324 struct drm_i915_private *dev_priv = dev->dev_private;
325 struct drm_i915_error_state *error;
326 unsigned long flags;
327
328 spin_lock_irqsave(&dev_priv->error_lock, flags);
329 if (dev_priv->first_error)
330 goto out;
331
332 error = kmalloc(sizeof(*error), GFP_ATOMIC);
333 if (!error) {
334 DRM_DEBUG("out ot memory, not capturing error state\n");
335 goto out;
336 }
337
338 error->eir = I915_READ(EIR);
339 error->pgtbl_er = I915_READ(PGTBL_ER);
340 error->pipeastat = I915_READ(PIPEASTAT);
341 error->pipebstat = I915_READ(PIPEBSTAT);
342 error->instpm = I915_READ(INSTPM);
343 if (!IS_I965G(dev)) {
344 error->ipeir = I915_READ(IPEIR);
345 error->ipehr = I915_READ(IPEHR);
346 error->instdone = I915_READ(INSTDONE);
347 error->acthd = I915_READ(ACTHD);
348 } else {
349 error->ipeir = I915_READ(IPEIR_I965);
350 error->ipehr = I915_READ(IPEHR_I965);
351 error->instdone = I915_READ(INSTDONE_I965);
352 error->instps = I915_READ(INSTPS);
353 error->instdone1 = I915_READ(INSTDONE1);
354 error->acthd = I915_READ(ACTHD_I965);
355 }
356
357 do_gettimeofday(&error->time);
358
359 dev_priv->first_error = error;
360
361out:
362 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
363}
364
365/**
366 * i915_handle_error - handle an error interrupt
367 * @dev: drm device
368 *
369 * Do some basic checking of regsiter state at error interrupt time and
370 * dump it to the syslog. Also call i915_capture_error_state() to make
371 * sure we get a record and make it available in debugfs. Fire a uevent
372 * so userspace knows something bad happened (should trigger collection
373 * of a ring dump etc.).
374 */
375static void i915_handle_error(struct drm_device *dev)
376{
377 struct drm_i915_private *dev_priv = dev->dev_private;
378 u32 eir = I915_READ(EIR);
379 u32 pipea_stats = I915_READ(PIPEASTAT);
380 u32 pipeb_stats = I915_READ(PIPEBSTAT);
381
382 i915_capture_error_state(dev);
383
384 printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
385 eir);
386
387 if (IS_G4X(dev)) {
388 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
389 u32 ipeir = I915_READ(IPEIR_I965);
390
391 printk(KERN_ERR " IPEIR: 0x%08x\n",
392 I915_READ(IPEIR_I965));
393 printk(KERN_ERR " IPEHR: 0x%08x\n",
394 I915_READ(IPEHR_I965));
395 printk(KERN_ERR " INSTDONE: 0x%08x\n",
396 I915_READ(INSTDONE_I965));
397 printk(KERN_ERR " INSTPS: 0x%08x\n",
398 I915_READ(INSTPS));
399 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
400 I915_READ(INSTDONE1));
401 printk(KERN_ERR " ACTHD: 0x%08x\n",
402 I915_READ(ACTHD_I965));
403 I915_WRITE(IPEIR_I965, ipeir);
404 (void)I915_READ(IPEIR_I965);
405 }
406 if (eir & GM45_ERROR_PAGE_TABLE) {
407 u32 pgtbl_err = I915_READ(PGTBL_ER);
408 printk(KERN_ERR "page table error\n");
409 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
410 pgtbl_err);
411 I915_WRITE(PGTBL_ER, pgtbl_err);
412 (void)I915_READ(PGTBL_ER);
413 }
414 }
415
416 if (IS_I9XX(dev)) {
417 if (eir & I915_ERROR_PAGE_TABLE) {
418 u32 pgtbl_err = I915_READ(PGTBL_ER);
419 printk(KERN_ERR "page table error\n");
420 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
421 pgtbl_err);
422 I915_WRITE(PGTBL_ER, pgtbl_err);
423 (void)I915_READ(PGTBL_ER);
424 }
425 }
426
427 if (eir & I915_ERROR_MEMORY_REFRESH) {
428 printk(KERN_ERR "memory refresh error\n");
429 printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
430 pipea_stats);
431 printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
432 pipeb_stats);
433 /* pipestat has already been acked */
434 }
435 if (eir & I915_ERROR_INSTRUCTION) {
436 printk(KERN_ERR "instruction error\n");
437 printk(KERN_ERR " INSTPM: 0x%08x\n",
438 I915_READ(INSTPM));
439 if (!IS_I965G(dev)) {
440 u32 ipeir = I915_READ(IPEIR);
441
442 printk(KERN_ERR " IPEIR: 0x%08x\n",
443 I915_READ(IPEIR));
444 printk(KERN_ERR " IPEHR: 0x%08x\n",
445 I915_READ(IPEHR));
446 printk(KERN_ERR " INSTDONE: 0x%08x\n",
447 I915_READ(INSTDONE));
448 printk(KERN_ERR " ACTHD: 0x%08x\n",
449 I915_READ(ACTHD));
450 I915_WRITE(IPEIR, ipeir);
451 (void)I915_READ(IPEIR);
452 } else {
453 u32 ipeir = I915_READ(IPEIR_I965);
454
455 printk(KERN_ERR " IPEIR: 0x%08x\n",
456 I915_READ(IPEIR_I965));
457 printk(KERN_ERR " IPEHR: 0x%08x\n",
458 I915_READ(IPEHR_I965));
459 printk(KERN_ERR " INSTDONE: 0x%08x\n",
460 I915_READ(INSTDONE_I965));
461 printk(KERN_ERR " INSTPS: 0x%08x\n",
462 I915_READ(INSTPS));
463 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
464 I915_READ(INSTDONE1));
465 printk(KERN_ERR " ACTHD: 0x%08x\n",
466 I915_READ(ACTHD_I965));
467 I915_WRITE(IPEIR_I965, ipeir);
468 (void)I915_READ(IPEIR_I965);
469 }
470 }
471
472 I915_WRITE(EIR, eir);
473 (void)I915_READ(EIR);
474 eir = I915_READ(EIR);
475 if (eir) {
476 /*
477 * some errors might have become stuck,
478 * mask them.
479 */
480 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
481 I915_WRITE(EMR, I915_READ(EMR) | eir);
482 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
483 }
484
485 queue_work(dev_priv->wq, &dev_priv->error_work);
486}
487
281irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 488irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
282{ 489{
283 struct drm_device *dev = (struct drm_device *) arg; 490 struct drm_device *dev = (struct drm_device *) arg;
@@ -319,15 +526,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
319 pipea_stats = I915_READ(PIPEASTAT); 526 pipea_stats = I915_READ(PIPEASTAT);
320 pipeb_stats = I915_READ(PIPEBSTAT); 527 pipeb_stats = I915_READ(PIPEBSTAT);
321 528
529 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
530 i915_handle_error(dev);
531
322 /* 532 /*
323 * Clear the PIPE(A|B)STAT regs before the IIR 533 * Clear the PIPE(A|B)STAT regs before the IIR
324 */ 534 */
325 if (pipea_stats & 0x8000ffff) { 535 if (pipea_stats & 0x8000ffff) {
536 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
537 DRM_DEBUG("pipe a underrun\n");
326 I915_WRITE(PIPEASTAT, pipea_stats); 538 I915_WRITE(PIPEASTAT, pipea_stats);
327 irq_received = 1; 539 irq_received = 1;
328 } 540 }
329 541
330 if (pipeb_stats & 0x8000ffff) { 542 if (pipeb_stats & 0x8000ffff) {
543 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
544 DRM_DEBUG("pipe b underrun\n");
331 I915_WRITE(PIPEBSTAT, pipeb_stats); 545 I915_WRITE(PIPEBSTAT, pipeb_stats);
332 irq_received = 1; 546 irq_received = 1;
333 } 547 }
@@ -346,7 +560,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
346 DRM_DEBUG("hotplug event received, stat 0x%08x\n", 560 DRM_DEBUG("hotplug event received, stat 0x%08x\n",
347 hotplug_status); 561 hotplug_status);
348 if (hotplug_status & dev_priv->hotplug_supported_mask) 562 if (hotplug_status & dev_priv->hotplug_supported_mask)
349 schedule_work(&dev_priv->hotplug_work); 563 queue_work(dev_priv->wq,
564 &dev_priv->hotplug_work);
350 565
351 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 566 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
352 I915_READ(PORT_HOTPLUG_STAT); 567 I915_READ(PORT_HOTPLUG_STAT);
@@ -699,6 +914,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
699 atomic_set(&dev_priv->irq_received, 0); 914 atomic_set(&dev_priv->irq_received, 0);
700 915
701 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 916 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
917 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
702 918
703 if (IS_IGDNG(dev)) { 919 if (IS_IGDNG(dev)) {
704 igdng_irq_preinstall(dev); 920 igdng_irq_preinstall(dev);
@@ -722,6 +938,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
722{ 938{
723 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 939 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
724 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 940 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
941 u32 error_mask;
725 942
726 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 943 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
727 944
@@ -758,6 +975,21 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
758 i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); 975 i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
759 } 976 }
760 977
978 /*
979 * Enable some error detection, note the instruction error mask
980 * bit is reserved, so we leave it masked.
981 */
982 if (IS_G4X(dev)) {
983 error_mask = ~(GM45_ERROR_PAGE_TABLE |
984 GM45_ERROR_MEM_PRIV |
985 GM45_ERROR_CP_PRIV |
986 I915_ERROR_MEMORY_REFRESH);
987 } else {
988 error_mask = ~(I915_ERROR_PAGE_TABLE |
989 I915_ERROR_MEMORY_REFRESH);
990 }
991 I915_WRITE(EMR, error_mask);
992
761 /* Disable pipe interrupt enables, clear pending pipe status */ 993 /* Disable pipe interrupt enables, clear pending pipe status */
762 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); 994 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
763 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); 995 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f6237a0b1133..2955083aa471 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -206,6 +206,7 @@
206/* 206/*
207 * Instruction and interrupt control regs 207 * Instruction and interrupt control regs
208 */ 208 */
209#define PGTBL_ER 0x02024
209#define PRB0_TAIL 0x02030 210#define PRB0_TAIL 0x02030
210#define PRB0_HEAD 0x02034 211#define PRB0_HEAD 0x02034
211#define PRB0_START 0x02038 212#define PRB0_START 0x02038
@@ -226,11 +227,18 @@
226#define PRB1_HEAD 0x02044 /* 915+ only */ 227#define PRB1_HEAD 0x02044 /* 915+ only */
227#define PRB1_START 0x02048 /* 915+ only */ 228#define PRB1_START 0x02048 /* 915+ only */
228#define PRB1_CTL 0x0204c /* 915+ only */ 229#define PRB1_CTL 0x0204c /* 915+ only */
230#define IPEIR_I965 0x02064
231#define IPEHR_I965 0x02068
232#define INSTDONE_I965 0x0206c
233#define INSTPS 0x02070 /* 965+ only */
234#define INSTDONE1 0x0207c /* 965+ only */
229#define ACTHD_I965 0x02074 235#define ACTHD_I965 0x02074
230#define HWS_PGA 0x02080 236#define HWS_PGA 0x02080
231#define HWS_ADDRESS_MASK 0xfffff000 237#define HWS_ADDRESS_MASK 0xfffff000
232#define HWS_START_ADDRESS_SHIFT 4 238#define HWS_START_ADDRESS_SHIFT 4
233#define IPEIR 0x02088 239#define IPEIR 0x02088
240#define IPEHR 0x0208c
241#define INSTDONE 0x02090
234#define NOPID 0x02094 242#define NOPID 0x02094
235#define HWSTAM 0x02098 243#define HWSTAM 0x02098
236#define SCPD0 0x0209c /* 915+ only */ 244#define SCPD0 0x0209c /* 915+ only */
@@ -258,10 +266,22 @@
258#define EIR 0x020b0 266#define EIR 0x020b0
259#define EMR 0x020b4 267#define EMR 0x020b4
260#define ESR 0x020b8 268#define ESR 0x020b8
269#define GM45_ERROR_PAGE_TABLE (1<<5)
270#define GM45_ERROR_MEM_PRIV (1<<4)
271#define I915_ERROR_PAGE_TABLE (1<<4)
272#define GM45_ERROR_CP_PRIV (1<<3)
273#define I915_ERROR_MEMORY_REFRESH (1<<1)
274#define I915_ERROR_INSTRUCTION (1<<0)
261#define INSTPM 0x020c0 275#define INSTPM 0x020c0
262#define ACTHD 0x020c8 276#define ACTHD 0x020c8
263#define FW_BLC 0x020d8 277#define FW_BLC 0x020d8
278#define FW_BLC2 0x020dc
264#define FW_BLC_SELF 0x020e0 /* 915+ only */ 279#define FW_BLC_SELF 0x020e0 /* 915+ only */
280#define FW_BLC_SELF_EN (1<<15)
281#define MM_BURST_LENGTH 0x00700000
282#define MM_FIFO_WATERMARK 0x0001F000
283#define LM_BURST_LENGTH 0x00000700
284#define LM_FIFO_WATERMARK 0x0000001F
265#define MI_ARB_STATE 0x020e4 /* 915+ only */ 285#define MI_ARB_STATE 0x020e4 /* 915+ only */
266#define CACHE_MODE_0 0x02120 /* 915+ only */ 286#define CACHE_MODE_0 0x02120 /* 915+ only */
267#define CM0_MASK_SHIFT 16 287#define CM0_MASK_SHIFT 16
@@ -569,6 +589,23 @@
569#define C0DRB3 0x10206 589#define C0DRB3 0x10206
570#define C1DRB3 0x10606 590#define C1DRB3 0x10606
571 591
592/* Clocking configuration register */
593#define CLKCFG 0x10c00
594#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
595#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
596#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
597#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
598#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
599#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
600/* Note, below two are guess */
601#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
602#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */
603#define CLKCFG_FSB_MASK (7 << 0)
604#define CLKCFG_MEM_533 (1 << 4)
605#define CLKCFG_MEM_667 (2 << 4)
606#define CLKCFG_MEM_800 (3 << 4)
607#define CLKCFG_MEM_MASK (7 << 4)
608
572/** GM965 GM45 render standby register */ 609/** GM965 GM45 render standby register */
573#define MCHBAR_RENDER_STANDBY 0x111B8 610#define MCHBAR_RENDER_STANDBY 0x111B8
574 611
@@ -834,9 +871,25 @@
834#define HORIZ_INTERP_MASK (3 << 6) 871#define HORIZ_INTERP_MASK (3 << 6)
835#define HORIZ_AUTO_SCALE (1 << 5) 872#define HORIZ_AUTO_SCALE (1 << 5)
836#define PANEL_8TO6_DITHER_ENABLE (1 << 3) 873#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
874#define PFIT_FILTER_FUZZY (0 << 24)
875#define PFIT_SCALING_AUTO (0 << 26)
876#define PFIT_SCALING_PROGRAMMED (1 << 26)
877#define PFIT_SCALING_PILLAR (2 << 26)
878#define PFIT_SCALING_LETTER (3 << 26)
837#define PFIT_PGM_RATIOS 0x61234 879#define PFIT_PGM_RATIOS 0x61234
838#define PFIT_VERT_SCALE_MASK 0xfff00000 880#define PFIT_VERT_SCALE_MASK 0xfff00000
839#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 881#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
882/* Pre-965 */
883#define PFIT_VERT_SCALE_SHIFT 20
884#define PFIT_VERT_SCALE_MASK 0xfff00000
885#define PFIT_HORIZ_SCALE_SHIFT 4
886#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
887/* 965+ */
888#define PFIT_VERT_SCALE_SHIFT_965 16
889#define PFIT_VERT_SCALE_MASK_965 0x1fff0000
890#define PFIT_HORIZ_SCALE_SHIFT_965 0
891#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
892
840#define PFIT_AUTO_RATIOS 0x61238 893#define PFIT_AUTO_RATIOS 0x61238
841 894
842/* Backlight control */ 895/* Backlight control */
@@ -1342,6 +1395,7 @@
1342#define TV_V_CHROMA_42 0x684a8 1395#define TV_V_CHROMA_42 0x684a8
1343 1396
1344/* Display Port */ 1397/* Display Port */
1398#define DP_A 0x64000 /* eDP */
1345#define DP_B 0x64100 1399#define DP_B 0x64100
1346#define DP_C 0x64200 1400#define DP_C 0x64200
1347#define DP_D 0x64300 1401#define DP_D 0x64300
@@ -1384,13 +1438,22 @@
1384/* Mystic DPCD version 1.1 special mode */ 1438/* Mystic DPCD version 1.1 special mode */
1385#define DP_ENHANCED_FRAMING (1 << 18) 1439#define DP_ENHANCED_FRAMING (1 << 18)
1386 1440
1441/* eDP */
1442#define DP_PLL_FREQ_270MHZ (0 << 16)
1443#define DP_PLL_FREQ_160MHZ (1 << 16)
1444#define DP_PLL_FREQ_MASK (3 << 16)
1445
1387/** locked once port is enabled */ 1446/** locked once port is enabled */
1388#define DP_PORT_REVERSAL (1 << 15) 1447#define DP_PORT_REVERSAL (1 << 15)
1389 1448
1449/* eDP */
1450#define DP_PLL_ENABLE (1 << 14)
1451
1390/** sends the clock on lane 15 of the PEG for debug */ 1452/** sends the clock on lane 15 of the PEG for debug */
1391#define DP_CLOCK_OUTPUT_ENABLE (1 << 13) 1453#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
1392 1454
1393#define DP_SCRAMBLING_DISABLE (1 << 12) 1455#define DP_SCRAMBLING_DISABLE (1 << 12)
1456#define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7)
1394 1457
1395/** limit RGB values to avoid confusing TVs */ 1458/** limit RGB values to avoid confusing TVs */
1396#define DP_COLOR_RANGE_16_235 (1 << 8) 1459#define DP_COLOR_RANGE_16_235 (1 << 8)
@@ -1410,6 +1473,13 @@
1410 * is 20 bytes in each direction, hence the 5 fixed 1473 * is 20 bytes in each direction, hence the 5 fixed
1411 * data registers 1474 * data registers
1412 */ 1475 */
1476#define DPA_AUX_CH_CTL 0x64010
1477#define DPA_AUX_CH_DATA1 0x64014
1478#define DPA_AUX_CH_DATA2 0x64018
1479#define DPA_AUX_CH_DATA3 0x6401c
1480#define DPA_AUX_CH_DATA4 0x64020
1481#define DPA_AUX_CH_DATA5 0x64024
1482
1413#define DPB_AUX_CH_CTL 0x64110 1483#define DPB_AUX_CH_CTL 0x64110
1414#define DPB_AUX_CH_DATA1 0x64114 1484#define DPB_AUX_CH_DATA1 0x64114
1415#define DPB_AUX_CH_DATA2 0x64118 1485#define DPB_AUX_CH_DATA2 0x64118
@@ -1552,6 +1622,34 @@
1552#define DSPARB_CSTART_SHIFT 7 1622#define DSPARB_CSTART_SHIFT 7
1553#define DSPARB_BSTART_MASK (0x7f) 1623#define DSPARB_BSTART_MASK (0x7f)
1554#define DSPARB_BSTART_SHIFT 0 1624#define DSPARB_BSTART_SHIFT 0
1625#define DSPARB_BEND_SHIFT 9 /* on 855 */
1626#define DSPARB_AEND_SHIFT 0
1627
1628#define DSPFW1 0x70034
1629#define DSPFW2 0x70038
1630#define DSPFW3 0x7003c
1631#define IGD_SELF_REFRESH_EN (1<<30)
1632
1633/* FIFO watermark sizes etc */
1634#define I915_FIFO_LINE_SIZE 64
1635#define I830_FIFO_LINE_SIZE 32
1636#define I945_FIFO_SIZE 127 /* 945 & 965 */
1637#define I915_FIFO_SIZE 95
1638#define I855GM_FIFO_SIZE 127 /* In cachelines */
1639#define I830_FIFO_SIZE 95
1640#define I915_MAX_WM 0x3f
1641
1642#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */
1643#define IGD_FIFO_LINE_SIZE 64
1644#define IGD_MAX_WM 0x1ff
1645#define IGD_DFT_WM 0x3f
1646#define IGD_DFT_HPLLOFF_WM 0
1647#define IGD_GUARD_WM 10
1648#define IGD_CURSOR_FIFO 64
1649#define IGD_CURSOR_MAX_WM 0x3f
1650#define IGD_CURSOR_DFT_WM 0
1651#define IGD_CURSOR_GUARD_WM 5
1652
1555/* 1653/*
1556 * The two pipe frame counter registers are not synchronized, so 1654 * The two pipe frame counter registers are not synchronized, so
1557 * reading a stable value is somewhat tricky. The following code 1655 * reading a stable value is somewhat tricky. The following code
@@ -1767,6 +1865,8 @@
1767#define PFA_CTL_1 0x68080 1865#define PFA_CTL_1 0x68080
1768#define PFB_CTL_1 0x68880 1866#define PFB_CTL_1 0x68880
1769#define PF_ENABLE (1<<31) 1867#define PF_ENABLE (1<<31)
1868#define PFA_WIN_SZ 0x68074
1869#define PFB_WIN_SZ 0x68874
1770 1870
1771/* legacy palette */ 1871/* legacy palette */
1772#define LGC_PALETTE_A 0x4a000 1872#define LGC_PALETTE_A 0x4a000
@@ -2127,4 +2227,28 @@
2127#define PCH_PP_OFF_DELAYS 0xc720c 2227#define PCH_PP_OFF_DELAYS 0xc720c
2128#define PCH_PP_DIVISOR 0xc7210 2228#define PCH_PP_DIVISOR 0xc7210
2129 2229
2230#define PCH_DP_B 0xe4100
2231#define PCH_DPB_AUX_CH_CTL 0xe4110
2232#define PCH_DPB_AUX_CH_DATA1 0xe4114
2233#define PCH_DPB_AUX_CH_DATA2 0xe4118
2234#define PCH_DPB_AUX_CH_DATA3 0xe411c
2235#define PCH_DPB_AUX_CH_DATA4 0xe4120
2236#define PCH_DPB_AUX_CH_DATA5 0xe4124
2237
2238#define PCH_DP_C 0xe4200
2239#define PCH_DPC_AUX_CH_CTL 0xe4210
2240#define PCH_DPC_AUX_CH_DATA1 0xe4214
2241#define PCH_DPC_AUX_CH_DATA2 0xe4218
2242#define PCH_DPC_AUX_CH_DATA3 0xe421c
2243#define PCH_DPC_AUX_CH_DATA4 0xe4220
2244#define PCH_DPC_AUX_CH_DATA5 0xe4224
2245
2246#define PCH_DP_D 0xe4300
2247#define PCH_DPD_AUX_CH_CTL 0xe4310
2248#define PCH_DPD_AUX_CH_DATA1 0xe4314
2249#define PCH_DPD_AUX_CH_DATA2 0xe4318
2250#define PCH_DPD_AUX_CH_DATA3 0xe431c
2251#define PCH_DPD_AUX_CH_DATA4 0xe4320
2252#define PCH_DPD_AUX_CH_DATA5 0xe4324
2253
2130#endif /* _I915_REG_H_ */ 2254#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a98e2831ed31..1d04e1904ac6 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -222,23 +222,12 @@ static void i915_restore_vga(struct drm_device *dev)
222 I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); 222 I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
223} 223}
224 224
225int i915_save_state(struct drm_device *dev) 225static void i915_save_modeset_reg(struct drm_device *dev)
226{ 226{
227 struct drm_i915_private *dev_priv = dev->dev_private; 227 struct drm_i915_private *dev_priv = dev->dev_private;
228 int i;
229
230 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
231
232 /* Render Standby */
233 if (IS_I965G(dev) && IS_MOBILE(dev))
234 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
235
236 /* Hardware status page */
237 dev_priv->saveHWS = I915_READ(HWS_PGA);
238
239 /* Display arbitration control */
240 dev_priv->saveDSPARB = I915_READ(DSPARB);
241 228
229 if (drm_core_check_feature(dev, DRIVER_MODESET))
230 return;
242 /* Pipe & plane A info */ 231 /* Pipe & plane A info */
243 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 232 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
244 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 233 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@@ -294,7 +283,122 @@ int i915_save_state(struct drm_device *dev)
294 } 283 }
295 i915_save_palette(dev, PIPE_B); 284 i915_save_palette(dev, PIPE_B);
296 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 285 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
286 return;
287}
288static void i915_restore_modeset_reg(struct drm_device *dev)
289{
290 struct drm_i915_private *dev_priv = dev->dev_private;
291
292 if (drm_core_check_feature(dev, DRIVER_MODESET))
293 return;
294
295 /* Pipe & plane A info */
296 /* Prime the clock */
297 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
298 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
299 ~DPLL_VCO_ENABLE);
300 DRM_UDELAY(150);
301 }
302 I915_WRITE(FPA0, dev_priv->saveFPA0);
303 I915_WRITE(FPA1, dev_priv->saveFPA1);
304 /* Actually enable it */
305 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
306 DRM_UDELAY(150);
307 if (IS_I965G(dev))
308 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
309 DRM_UDELAY(150);
310
311 /* Restore mode */
312 I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
313 I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
314 I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
315 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
316 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
317 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
318 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
319
320 /* Restore plane info */
321 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
322 I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
323 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
324 I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
325 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
326 if (IS_I965G(dev)) {
327 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
328 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
329 }
330
331 I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
332
333 i915_restore_palette(dev, PIPE_A);
334 /* Enable the plane */
335 I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
336 I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
337
338 /* Pipe & plane B info */
339 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
340 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
341 ~DPLL_VCO_ENABLE);
342 DRM_UDELAY(150);
343 }
344 I915_WRITE(FPB0, dev_priv->saveFPB0);
345 I915_WRITE(FPB1, dev_priv->saveFPB1);
346 /* Actually enable it */
347 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
348 DRM_UDELAY(150);
349 if (IS_I965G(dev))
350 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
351 DRM_UDELAY(150);
352
353 /* Restore mode */
354 I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
355 I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
356 I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
357 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
358 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
359 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
360 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
361
362 /* Restore plane info */
363 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
364 I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
365 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
366 I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
367 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
368 if (IS_I965G(dev)) {
369 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
370 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
371 }
372
373 I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
374
375 i915_restore_palette(dev, PIPE_B);
376 /* Enable the plane */
377 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
378 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
297 379
380 return;
381}
382int i915_save_state(struct drm_device *dev)
383{
384 struct drm_i915_private *dev_priv = dev->dev_private;
385 int i;
386
387 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
388
389 /* Render Standby */
390 if (IS_I965G(dev) && IS_MOBILE(dev))
391 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
392
393 /* Hardware status page */
394 dev_priv->saveHWS = I915_READ(HWS_PGA);
395
396 /* Display arbitration control */
397 dev_priv->saveDSPARB = I915_READ(DSPARB);
398
399 /* This is only meaningful in non-KMS mode */
400 /* Don't save them in KMS mode */
401 i915_save_modeset_reg(dev);
298 /* Cursor state */ 402 /* Cursor state */
299 dev_priv->saveCURACNTR = I915_READ(CURACNTR); 403 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
300 dev_priv->saveCURAPOS = I915_READ(CURAPOS); 404 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
@@ -322,6 +426,20 @@ int i915_save_state(struct drm_device *dev)
322 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 426 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
323 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); 427 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
324 428
429 /* Display Port state */
430 if (SUPPORTS_INTEGRATED_DP(dev)) {
431 dev_priv->saveDP_B = I915_READ(DP_B);
432 dev_priv->saveDP_C = I915_READ(DP_C);
433 dev_priv->saveDP_D = I915_READ(DP_D);
434 dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
435 dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
436 dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
437 dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
438 dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
439 dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
440 dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
441 dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
442 }
325 /* FIXME: save TV & SDVO state */ 443 /* FIXME: save TV & SDVO state */
326 444
327 /* FBC state */ 445 /* FBC state */
@@ -404,92 +522,21 @@ int i915_restore_state(struct drm_device *dev)
404 for (i = 0; i < 8; i++) 522 for (i = 0; i < 8; i++)
405 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 523 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
406 } 524 }
407 525
408 /* Pipe & plane A info */ 526 /* Display port ratios (must be done before clock is set) */
409 /* Prime the clock */ 527 if (SUPPORTS_INTEGRATED_DP(dev)) {
410 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 528 I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
411 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & 529 I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
412 ~DPLL_VCO_ENABLE); 530 I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
413 DRM_UDELAY(150); 531 I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
532 I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
533 I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
534 I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
535 I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
414 } 536 }
415 I915_WRITE(FPA0, dev_priv->saveFPA0); 537 /* This is only meaningful in non-KMS mode */
416 I915_WRITE(FPA1, dev_priv->saveFPA1); 538 /* Don't restore them in KMS mode */
417 /* Actually enable it */ 539 i915_restore_modeset_reg(dev);
418 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
419 DRM_UDELAY(150);
420 if (IS_I965G(dev))
421 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
422 DRM_UDELAY(150);
423
424 /* Restore mode */
425 I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
426 I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
427 I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
428 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
429 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
430 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
431 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
432
433 /* Restore plane info */
434 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
435 I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
436 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
437 I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
438 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
439 if (IS_I965G(dev)) {
440 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
441 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
442 }
443
444 I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
445
446 i915_restore_palette(dev, PIPE_A);
447 /* Enable the plane */
448 I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
449 I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
450
451 /* Pipe & plane B info */
452 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
453 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
454 ~DPLL_VCO_ENABLE);
455 DRM_UDELAY(150);
456 }
457 I915_WRITE(FPB0, dev_priv->saveFPB0);
458 I915_WRITE(FPB1, dev_priv->saveFPB1);
459 /* Actually enable it */
460 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
461 DRM_UDELAY(150);
462 if (IS_I965G(dev))
463 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
464 DRM_UDELAY(150);
465
466 /* Restore mode */
467 I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
468 I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
469 I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
470 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
471 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
472 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
473 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
474
475 /* Restore plane info */
476 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
477 I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
478 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
479 I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
480 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
481 if (IS_I965G(dev)) {
482 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
483 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
484 }
485
486 I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
487
488 i915_restore_palette(dev, PIPE_B);
489 /* Enable the plane */
490 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
491 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
492
493 /* Cursor state */ 540 /* Cursor state */
494 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); 541 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
495 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); 542 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
@@ -518,6 +565,12 @@ int i915_restore_state(struct drm_device *dev)
518 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); 565 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
519 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); 566 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
520 567
568 /* Display Port state */
569 if (SUPPORTS_INTEGRATED_DP(dev)) {
570 I915_WRITE(DP_B, dev_priv->saveDP_B);
571 I915_WRITE(DP_C, dev_priv->saveDP_C);
572 I915_WRITE(DP_D, dev_priv->saveDP_D);
573 }
521 /* FIXME: restore TV & SDVO state */ 574 /* FIXME: restore TV & SDVO state */
522 575
523 /* FBC info */ 576 /* FBC info */
@@ -545,7 +598,7 @@ int i915_restore_state(struct drm_device *dev)
545 598
546 for (i = 0; i < 16; i++) { 599 for (i = 0; i < 16; i++) {
547 I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); 600 I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
548 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); 601 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
549 } 602 }
550 for (i = 0; i < 3; i++) 603 for (i = 0; i < 3; i++)
551 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 604 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index cdd126d068a7..300aee3296c2 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -99,9 +99,11 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
99{ 99{
100 struct bdb_lvds_options *lvds_options; 100 struct bdb_lvds_options *lvds_options;
101 struct bdb_lvds_lfp_data *lvds_lfp_data; 101 struct bdb_lvds_lfp_data *lvds_lfp_data;
102 struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
102 struct bdb_lvds_lfp_data_entry *entry; 103 struct bdb_lvds_lfp_data_entry *entry;
103 struct lvds_dvo_timing *dvo_timing; 104 struct lvds_dvo_timing *dvo_timing;
104 struct drm_display_mode *panel_fixed_mode; 105 struct drm_display_mode *panel_fixed_mode;
106 int lfp_data_size, dvo_timing_offset;
105 107
106 /* Defaults if we can't find VBT info */ 108 /* Defaults if we can't find VBT info */
107 dev_priv->lvds_dither = 0; 109 dev_priv->lvds_dither = 0;
@@ -119,10 +121,27 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
119 if (!lvds_lfp_data) 121 if (!lvds_lfp_data)
120 return; 122 return;
121 123
124 lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
125 if (!lvds_lfp_data_ptrs)
126 return;
127
122 dev_priv->lvds_vbt = 1; 128 dev_priv->lvds_vbt = 1;
123 129
124 entry = &lvds_lfp_data->data[lvds_options->panel_type]; 130 lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
125 dvo_timing = &entry->dvo_timing; 131 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
132 entry = (struct bdb_lvds_lfp_data_entry *)
133 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
134 lvds_options->panel_type));
135 dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
136 lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
137
138 /*
139 * the size of fp_timing varies on the different platform.
140 * So calculate the DVO timing relative offset in LVDS data
141 * entry to get the DVO timing entry
142 */
143 dvo_timing = (struct lvds_dvo_timing *)
144 ((unsigned char *)entry + dvo_timing_offset);
126 145
127 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 146 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
128 147
@@ -185,10 +204,12 @@ parse_general_features(struct drm_i915_private *dev_priv,
185 dev_priv->lvds_use_ssc = general->enable_ssc; 204 dev_priv->lvds_use_ssc = general->enable_ssc;
186 205
187 if (dev_priv->lvds_use_ssc) { 206 if (dev_priv->lvds_use_ssc) {
188 if (IS_I855(dev_priv->dev)) 207 if (IS_I85X(dev_priv->dev))
189 dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; 208 dev_priv->lvds_ssc_freq =
190 else 209 general->ssc_freq ? 66 : 48;
191 dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; 210 else
211 dev_priv->lvds_ssc_freq =
212 general->ssc_freq ? 100 : 96;
192 } 213 }
193 } 214 }
194} 215}
@@ -275,6 +296,25 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
275 } 296 }
276 return; 297 return;
277} 298}
299
300static void
301parse_driver_features(struct drm_i915_private *dev_priv,
302 struct bdb_header *bdb)
303{
304 struct drm_device *dev = dev_priv->dev;
305 struct bdb_driver_features *driver;
306
307 /* set default for chips without eDP */
308 if (!SUPPORTS_EDP(dev)) {
309 dev_priv->edp_support = 0;
310 return;
311 }
312
313 driver = find_section(bdb, BDB_DRIVER_FEATURES);
314 if (driver && driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
315 dev_priv->edp_support = 1;
316}
317
278/** 318/**
279 * intel_init_bios - initialize VBIOS settings & find VBT 319 * intel_init_bios - initialize VBIOS settings & find VBT
280 * @dev: DRM device 320 * @dev: DRM device
@@ -325,6 +365,8 @@ intel_init_bios(struct drm_device *dev)
325 parse_lfp_panel_data(dev_priv, bdb); 365 parse_lfp_panel_data(dev_priv, bdb);
326 parse_sdvo_panel_data(dev_priv, bdb); 366 parse_sdvo_panel_data(dev_priv, bdb);
327 parse_sdvo_device_mapping(dev_priv, bdb); 367 parse_sdvo_device_mapping(dev_priv, bdb);
368 parse_driver_features(dev_priv, bdb);
369
328 pci_unmap_rom(pdev, bios); 370 pci_unmap_rom(pdev, bios);
329 371
330 return 0; 372 return 0;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index fe72e1c225d8..0f8e5f69ac7a 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -381,6 +381,51 @@ struct bdb_sdvo_lvds_options {
381} __attribute__((packed)); 381} __attribute__((packed));
382 382
383 383
384#define BDB_DRIVER_FEATURE_NO_LVDS 0
385#define BDB_DRIVER_FEATURE_INT_LVDS 1
386#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
387#define BDB_DRIVER_FEATURE_EDP 3
388
389struct bdb_driver_features {
390 u8 boot_dev_algorithm:1;
391 u8 block_display_switch:1;
392 u8 allow_display_switch:1;
393 u8 hotplug_dvo:1;
394 u8 dual_view_zoom:1;
395 u8 int15h_hook:1;
396 u8 sprite_in_clone:1;
397 u8 primary_lfp_id:1;
398
399 u16 boot_mode_x;
400 u16 boot_mode_y;
401 u8 boot_mode_bpp;
402 u8 boot_mode_refresh;
403
404 u16 enable_lfp_primary:1;
405 u16 selective_mode_pruning:1;
406 u16 dual_frequency:1;
407 u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
408 u16 nt_clone_support:1;
409 u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
410 u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
411 u16 cui_aspect_scaling:1;
412 u16 preserve_aspect_ratio:1;
413 u16 sdvo_device_power_down:1;
414 u16 crt_hotplug:1;
415 u16 lvds_config:2;
416 u16 tv_hotplug:1;
417 u16 hdmi_config:2;
418
419 u8 static_display:1;
420 u8 reserved2:7;
421 u16 legacy_crt_max_x;
422 u16 legacy_crt_max_y;
423 u8 legacy_crt_max_refresh;
424
425 u8 hdmi_termination;
426 u8 custom_vbt_version;
427} __attribute__((packed));
428
384bool intel_init_bios(struct drm_device *dev); 429bool intel_init_bios(struct drm_device *dev);
385 430
386/* 431/*
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6de97fc66029..4cf8e2e88a40 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -46,7 +46,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
46 46
47 temp = I915_READ(reg); 47 temp = I915_READ(reg);
48 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 48 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
49 temp |= ADPA_DAC_ENABLE; 49 temp &= ~ADPA_DAC_ENABLE;
50 50
51 switch(mode) { 51 switch(mode) {
52 case DRM_MODE_DPMS_ON: 52 case DRM_MODE_DPMS_ON:
@@ -156,6 +156,9 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
156 156
157 temp = adpa = I915_READ(PCH_ADPA); 157 temp = adpa = I915_READ(PCH_ADPA);
158 158
159 adpa &= ~ADPA_DAC_ENABLE;
160 I915_WRITE(PCH_ADPA, adpa);
161
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 162 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160 163
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
@@ -169,13 +172,14 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
169 DRM_DEBUG("pch crt adpa 0x%x", adpa); 172 DRM_DEBUG("pch crt adpa 0x%x", adpa);
170 I915_WRITE(PCH_ADPA, adpa); 173 I915_WRITE(PCH_ADPA, adpa);
171 174
172 /* This might not be needed as not specified in spec...*/ 175 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
173 udelay(1000); 176 ;
174 177
175 /* Check the status to see if both blue and green are on now */ 178 /* Check the status to see if both blue and green are on now */
176 adpa = I915_READ(PCH_ADPA); 179 adpa = I915_READ(PCH_ADPA);
177 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) == 180 adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
178 ADPA_CRT_HOTPLUG_MONITOR_COLOR) 181 if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) ||
182 (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO))
179 ret = true; 183 ret = true;
180 else 184 else
181 ret = false; 185 ret = false;
@@ -428,8 +432,34 @@ static void intel_crt_destroy(struct drm_connector *connector)
428 432
429static int intel_crt_get_modes(struct drm_connector *connector) 433static int intel_crt_get_modes(struct drm_connector *connector)
430{ 434{
435 int ret;
431 struct intel_output *intel_output = to_intel_output(connector); 436 struct intel_output *intel_output = to_intel_output(connector);
432 return intel_ddc_get_modes(intel_output); 437 struct i2c_adapter *ddcbus;
438 struct drm_device *dev = connector->dev;
439
440
441 ret = intel_ddc_get_modes(intel_output);
442 if (ret || !IS_G4X(dev))
443 goto end;
444
445 ddcbus = intel_output->ddc_bus;
446 /* Try to probe digital port for output in DVI-I -> VGA mode. */
447 intel_output->ddc_bus =
448 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
449
450 if (!intel_output->ddc_bus) {
451 intel_output->ddc_bus = ddcbus;
452 dev_printk(KERN_ERR, &connector->dev->pdev->dev,
453 "DDC bus registration failed for CRTDDC_D.\n");
454 goto end;
455 }
456 /* Try to get modes by GPIOD port */
457 ret = intel_ddc_get_modes(intel_output);
458 intel_i2c_destroy(ddcbus);
459
460end:
461 return ret;
462
433} 463}
434 464
435static int intel_crt_set_property(struct drm_connector *connector, 465static int intel_crt_set_property(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3e1c78162119..d6fce2133413 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -25,14 +25,19 @@
25 */ 25 */
26 26
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/kernel.h>
28#include "drmP.h" 29#include "drmP.h"
29#include "intel_drv.h" 30#include "intel_drv.h"
30#include "i915_drm.h" 31#include "i915_drm.h"
31#include "i915_drv.h" 32#include "i915_drv.h"
33#include "intel_dp.h"
32 34
33#include "drm_crtc_helper.h" 35#include "drm_crtc_helper.h"
34 36
37#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
38
35bool intel_pipe_has_type (struct drm_crtc *crtc, int type); 39bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
40static void intel_update_watermarks(struct drm_device *dev);
36 41
37typedef struct { 42typedef struct {
38 /* given values */ 43 /* given values */
@@ -85,7 +90,7 @@ struct intel_limit {
85#define I8XX_P2_SLOW 4 90#define I8XX_P2_SLOW 4
86#define I8XX_P2_FAST 2 91#define I8XX_P2_FAST 2
87#define I8XX_P2_LVDS_SLOW 14 92#define I8XX_P2_LVDS_SLOW 14
88#define I8XX_P2_LVDS_FAST 14 /* No fast option */ 93#define I8XX_P2_LVDS_FAST 7
89#define I8XX_P2_SLOW_LIMIT 165000 94#define I8XX_P2_SLOW_LIMIT 165000
90 95
91#define I9XX_DOT_MIN 20000 96#define I9XX_DOT_MIN 20000
@@ -127,19 +132,6 @@ struct intel_limit {
127#define I9XX_P2_LVDS_FAST 7 132#define I9XX_P2_LVDS_FAST 7
128#define I9XX_P2_LVDS_SLOW_LIMIT 112000 133#define I9XX_P2_LVDS_SLOW_LIMIT 112000
129 134
130#define INTEL_LIMIT_I8XX_DVO_DAC 0
131#define INTEL_LIMIT_I8XX_LVDS 1
132#define INTEL_LIMIT_I9XX_SDVO_DAC 2
133#define INTEL_LIMIT_I9XX_LVDS 3
134#define INTEL_LIMIT_G4X_SDVO 4
135#define INTEL_LIMIT_G4X_HDMI_DAC 5
136#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
138#define INTEL_LIMIT_IGD_SDVO_DAC 8
139#define INTEL_LIMIT_IGD_LVDS 9
140#define INTEL_LIMIT_IGDNG_SDVO_DAC 10
141#define INTEL_LIMIT_IGDNG_LVDS 11
142
143/*The parameter is for SDVO on G4x platform*/ 135/*The parameter is for SDVO on G4x platform*/
144#define G4X_DOT_SDVO_MIN 25000 136#define G4X_DOT_SDVO_MIN 25000
145#define G4X_DOT_SDVO_MAX 270000 137#define G4X_DOT_SDVO_MAX 270000
@@ -218,6 +210,25 @@ struct intel_limit {
218#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 210#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
219#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 211#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
220 212
213/*The parameter is for DISPLAY PORT on G4x platform*/
214#define G4X_DOT_DISPLAY_PORT_MIN 161670
215#define G4X_DOT_DISPLAY_PORT_MAX 227000
216#define G4X_N_DISPLAY_PORT_MIN 1
217#define G4X_N_DISPLAY_PORT_MAX 2
218#define G4X_M_DISPLAY_PORT_MIN 97
219#define G4X_M_DISPLAY_PORT_MAX 108
220#define G4X_M1_DISPLAY_PORT_MIN 0x10
221#define G4X_M1_DISPLAY_PORT_MAX 0x12
222#define G4X_M2_DISPLAY_PORT_MIN 0x05
223#define G4X_M2_DISPLAY_PORT_MAX 0x06
224#define G4X_P_DISPLAY_PORT_MIN 10
225#define G4X_P_DISPLAY_PORT_MAX 20
226#define G4X_P1_DISPLAY_PORT_MIN 1
227#define G4X_P1_DISPLAY_PORT_MAX 2
228#define G4X_P2_DISPLAY_PORT_SLOW 10
229#define G4X_P2_DISPLAY_PORT_FAST 10
230#define G4X_P2_DISPLAY_PORT_LIMIT 0
231
221/* IGDNG */ 232/* IGDNG */
222/* as we calculate clock using (register_value + 2) for 233/* as we calculate clock using (register_value + 2) for
223 N/M1/M2, so here the range value for them is (actual_value-2). 234 N/M1/M2, so here the range value for them is (actual_value-2).
@@ -256,8 +267,14 @@ static bool
256intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 267intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
257 int target, int refclk, intel_clock_t *best_clock); 268 int target, int refclk, intel_clock_t *best_clock);
258 269
259static const intel_limit_t intel_limits[] = { 270static bool
260 { /* INTEL_LIMIT_I8XX_DVO_DAC */ 271intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
272 int target, int refclk, intel_clock_t *best_clock);
273static bool
274intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc,
275 int target, int refclk, intel_clock_t *best_clock);
276
277static const intel_limit_t intel_limits_i8xx_dvo = {
261 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 278 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
262 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 279 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
263 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 280 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -269,8 +286,9 @@ static const intel_limit_t intel_limits[] = {
269 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 286 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
270 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 287 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
271 .find_pll = intel_find_best_PLL, 288 .find_pll = intel_find_best_PLL,
272 }, 289};
273 { /* INTEL_LIMIT_I8XX_LVDS */ 290
291static const intel_limit_t intel_limits_i8xx_lvds = {
274 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 292 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
275 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 293 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
276 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 294 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -282,8 +300,9 @@ static const intel_limit_t intel_limits[] = {
282 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 300 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
283 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 301 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
284 .find_pll = intel_find_best_PLL, 302 .find_pll = intel_find_best_PLL,
285 }, 303};
286 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 304
305static const intel_limit_t intel_limits_i9xx_sdvo = {
287 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 306 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
288 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 307 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
289 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 308 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -295,8 +314,9 @@ static const intel_limit_t intel_limits[] = {
295 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 314 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
296 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 315 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
297 .find_pll = intel_find_best_PLL, 316 .find_pll = intel_find_best_PLL,
298 }, 317};
299 { /* INTEL_LIMIT_I9XX_LVDS */ 318
319static const intel_limit_t intel_limits_i9xx_lvds = {
300 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 320 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
301 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 321 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
302 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 322 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -311,9 +331,10 @@ static const intel_limit_t intel_limits[] = {
311 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 331 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
312 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 332 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
313 .find_pll = intel_find_best_PLL, 333 .find_pll = intel_find_best_PLL,
314 }, 334};
335
315 /* below parameter and function is for G4X Chipset Family*/ 336 /* below parameter and function is for G4X Chipset Family*/
316 { /* INTEL_LIMIT_G4X_SDVO */ 337static const intel_limit_t intel_limits_g4x_sdvo = {
317 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, 338 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
318 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 339 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
319 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, 340 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
@@ -327,8 +348,9 @@ static const intel_limit_t intel_limits[] = {
327 .p2_fast = G4X_P2_SDVO_FAST 348 .p2_fast = G4X_P2_SDVO_FAST
328 }, 349 },
329 .find_pll = intel_g4x_find_best_PLL, 350 .find_pll = intel_g4x_find_best_PLL,
330 }, 351};
331 { /* INTEL_LIMIT_G4X_HDMI_DAC */ 352
353static const intel_limit_t intel_limits_g4x_hdmi = {
332 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, 354 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
333 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 355 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
334 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, 356 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
@@ -342,8 +364,9 @@ static const intel_limit_t intel_limits[] = {
342 .p2_fast = G4X_P2_HDMI_DAC_FAST 364 .p2_fast = G4X_P2_HDMI_DAC_FAST
343 }, 365 },
344 .find_pll = intel_g4x_find_best_PLL, 366 .find_pll = intel_g4x_find_best_PLL,
345 }, 367};
346 { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ 368
369static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
347 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, 370 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
348 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, 371 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
349 .vco = { .min = G4X_VCO_MIN, 372 .vco = { .min = G4X_VCO_MIN,
@@ -365,8 +388,9 @@ static const intel_limit_t intel_limits[] = {
365 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 388 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
366 }, 389 },
367 .find_pll = intel_g4x_find_best_PLL, 390 .find_pll = intel_g4x_find_best_PLL,
368 }, 391};
369 { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ 392
393static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
370 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, 394 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
371 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, 395 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
372 .vco = { .min = G4X_VCO_MIN, 396 .vco = { .min = G4X_VCO_MIN,
@@ -388,8 +412,32 @@ static const intel_limit_t intel_limits[] = {
388 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 412 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
389 }, 413 },
390 .find_pll = intel_g4x_find_best_PLL, 414 .find_pll = intel_g4x_find_best_PLL,
391 }, 415};
392 { /* INTEL_LIMIT_IGD_SDVO */ 416
417static const intel_limit_t intel_limits_g4x_display_port = {
418 .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN,
419 .max = G4X_DOT_DISPLAY_PORT_MAX },
420 .vco = { .min = G4X_VCO_MIN,
421 .max = G4X_VCO_MAX},
422 .n = { .min = G4X_N_DISPLAY_PORT_MIN,
423 .max = G4X_N_DISPLAY_PORT_MAX },
424 .m = { .min = G4X_M_DISPLAY_PORT_MIN,
425 .max = G4X_M_DISPLAY_PORT_MAX },
426 .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN,
427 .max = G4X_M1_DISPLAY_PORT_MAX },
428 .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
429 .max = G4X_M2_DISPLAY_PORT_MAX },
430 .p = { .min = G4X_P_DISPLAY_PORT_MIN,
431 .max = G4X_P_DISPLAY_PORT_MAX },
432 .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
433 .max = G4X_P1_DISPLAY_PORT_MAX},
434 .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
435 .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
436 .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
437 .find_pll = intel_find_pll_g4x_dp,
438};
439
440static const intel_limit_t intel_limits_igd_sdvo = {
393 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 441 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
394 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, 442 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
395 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, 443 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -401,8 +449,9 @@ static const intel_limit_t intel_limits[] = {
401 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 449 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
402 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 450 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
403 .find_pll = intel_find_best_PLL, 451 .find_pll = intel_find_best_PLL,
404 }, 452};
405 { /* INTEL_LIMIT_IGD_LVDS */ 453
454static const intel_limit_t intel_limits_igd_lvds = {
406 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 455 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
407 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, 456 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
408 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, 457 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -415,8 +464,9 @@ static const intel_limit_t intel_limits[] = {
415 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 464 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
416 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 465 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
417 .find_pll = intel_find_best_PLL, 466 .find_pll = intel_find_best_PLL,
418 }, 467};
419 { /* INTEL_LIMIT_IGDNG_SDVO_DAC */ 468
469static const intel_limit_t intel_limits_igdng_sdvo = {
420 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, 470 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
421 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, 471 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
422 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, 472 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -429,8 +479,9 @@ static const intel_limit_t intel_limits[] = {
429 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, 479 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
430 .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, 480 .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
431 .find_pll = intel_igdng_find_best_PLL, 481 .find_pll = intel_igdng_find_best_PLL,
432 }, 482};
433 { /* INTEL_LIMIT_IGDNG_LVDS */ 483
484static const intel_limit_t intel_limits_igdng_lvds = {
434 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, 485 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
435 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, 486 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
436 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, 487 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -443,16 +494,15 @@ static const intel_limit_t intel_limits[] = {
443 .p2_slow = IGDNG_P2_LVDS_SLOW, 494 .p2_slow = IGDNG_P2_LVDS_SLOW,
444 .p2_fast = IGDNG_P2_LVDS_FAST }, 495 .p2_fast = IGDNG_P2_LVDS_FAST },
445 .find_pll = intel_igdng_find_best_PLL, 496 .find_pll = intel_igdng_find_best_PLL,
446 },
447}; 497};
448 498
449static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) 499static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
450{ 500{
451 const intel_limit_t *limit; 501 const intel_limit_t *limit;
452 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 502 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
453 limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS]; 503 limit = &intel_limits_igdng_lvds;
454 else 504 else
455 limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC]; 505 limit = &intel_limits_igdng_sdvo;
456 506
457 return limit; 507 return limit;
458} 508}
@@ -467,19 +517,19 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
467 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 517 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
468 LVDS_CLKB_POWER_UP) 518 LVDS_CLKB_POWER_UP)
469 /* LVDS with dual channel */ 519 /* LVDS with dual channel */
470 limit = &intel_limits 520 limit = &intel_limits_g4x_dual_channel_lvds;
471 [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
472 else 521 else
473 /* LVDS with dual channel */ 522 /* LVDS with dual channel */
474 limit = &intel_limits 523 limit = &intel_limits_g4x_single_channel_lvds;
475 [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
476 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 524 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
477 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 525 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
478 limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC]; 526 limit = &intel_limits_g4x_hdmi;
479 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 527 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
480 limit = &intel_limits[INTEL_LIMIT_G4X_SDVO]; 528 limit = &intel_limits_g4x_sdvo;
529 } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
530 limit = &intel_limits_g4x_display_port;
481 } else /* The option is for other outputs */ 531 } else /* The option is for other outputs */
482 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 532 limit = &intel_limits_i9xx_sdvo;
483 533
484 return limit; 534 return limit;
485} 535}
@@ -495,19 +545,19 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
495 limit = intel_g4x_limit(crtc); 545 limit = intel_g4x_limit(crtc);
496 } else if (IS_I9XX(dev) && !IS_IGD(dev)) { 546 } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
497 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 547 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
498 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; 548 limit = &intel_limits_i9xx_lvds;
499 else 549 else
500 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 550 limit = &intel_limits_i9xx_sdvo;
501 } else if (IS_IGD(dev)) { 551 } else if (IS_IGD(dev)) {
502 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 552 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
503 limit = &intel_limits[INTEL_LIMIT_IGD_LVDS]; 553 limit = &intel_limits_igd_lvds;
504 else 554 else
505 limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC]; 555 limit = &intel_limits_igd_sdvo;
506 } else { 556 } else {
507 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 557 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
508 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; 558 limit = &intel_limits_i8xx_lvds;
509 else 559 else
510 limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC]; 560 limit = &intel_limits_i8xx_dvo;
511 } 561 }
512 return limit; 562 return limit;
513} 563}
@@ -553,6 +603,23 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
553 return false; 603 return false;
554} 604}
555 605
606struct drm_connector *
607intel_pipe_get_output (struct drm_crtc *crtc)
608{
609 struct drm_device *dev = crtc->dev;
610 struct drm_mode_config *mode_config = &dev->mode_config;
611 struct drm_connector *l_entry, *ret = NULL;
612
613 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
614 if (l_entry->encoder &&
615 l_entry->encoder->crtc == crtc) {
616 ret = l_entry;
617 break;
618 }
619 }
620 return ret;
621}
622
556#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 623#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
557/** 624/**
558 * Returns whether the given set of divisors are valid for a given refclk with 625 * Returns whether the given set of divisors are valid for a given refclk with
@@ -600,7 +667,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
600 int err = target; 667 int err = target;
601 668
602 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 669 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
603 (I915_READ(LVDS) & LVDS_PORT_EN) != 0) { 670 (I915_READ(LVDS)) != 0) {
604 /* 671 /*
605 * For LVDS, if the panel is on, just rely on its current 672 * For LVDS, if the panel is on, just rely on its current
606 * settings for dual-channel. We haven't figured out how to 673 * settings for dual-channel. We haven't figured out how to
@@ -707,6 +774,30 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
707} 774}
708 775
709static bool 776static bool
777intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
778 int target, int refclk, intel_clock_t *best_clock)
779{
780 struct drm_device *dev = crtc->dev;
781 intel_clock_t clock;
782 if (target < 200000) {
783 clock.n = 1;
784 clock.p1 = 2;
785 clock.p2 = 10;
786 clock.m1 = 12;
787 clock.m2 = 9;
788 } else {
789 clock.n = 2;
790 clock.p1 = 1;
791 clock.p2 = 10;
792 clock.m1 = 14;
793 clock.m2 = 8;
794 }
795 intel_clock(dev, refclk, &clock);
796 memcpy(best_clock, &clock, sizeof(intel_clock_t));
797 return true;
798}
799
800static bool
710intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 801intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
711 int target, int refclk, intel_clock_t *best_clock) 802 int target, int refclk, intel_clock_t *best_clock)
712{ 803{
@@ -718,6 +809,14 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
718 int err_most = 47; 809 int err_most = 47;
719 found = false; 810 found = false;
720 811
812 /* eDP has only 2 clock choice, no n/m/p setting */
813 if (HAS_eDP)
814 return true;
815
816 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
817 return intel_find_pll_igdng_dp(limit, crtc, target,
818 refclk, best_clock);
819
721 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 820 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
722 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 821 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
723 LVDS_CLKB_POWER_UP) 822 LVDS_CLKB_POWER_UP)
@@ -764,6 +863,32 @@ out:
764 return found; 863 return found;
765} 864}
766 865
866/* DisplayPort has only two frequencies, 162MHz and 270MHz */
867static bool
868intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
869 int target, int refclk, intel_clock_t *best_clock)
870{
871 intel_clock_t clock;
872 if (target < 200000) {
873 clock.p1 = 2;
874 clock.p2 = 10;
875 clock.n = 2;
876 clock.m1 = 23;
877 clock.m2 = 8;
878 } else {
879 clock.p1 = 1;
880 clock.p2 = 10;
881 clock.n = 1;
882 clock.m1 = 14;
883 clock.m2 = 2;
884 }
885 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
886 clock.p = (clock.p1 * clock.p2);
887 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
888 memcpy(best_clock, &clock, sizeof(intel_clock_t));
889 return true;
890}
891
767void 892void
768intel_wait_for_vblank(struct drm_device *dev) 893intel_wait_for_vblank(struct drm_device *dev)
769{ 894{
@@ -927,13 +1052,97 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
927 return 0; 1052 return 0;
928} 1053}
929 1054
1055/* Disable the VGA plane that we never use */
1056static void i915_disable_vga (struct drm_device *dev)
1057{
1058 struct drm_i915_private *dev_priv = dev->dev_private;
1059 u8 sr1;
1060 u32 vga_reg;
1061
1062 if (IS_IGDNG(dev))
1063 vga_reg = CPU_VGACNTRL;
1064 else
1065 vga_reg = VGACNTRL;
1066
1067 if (I915_READ(vga_reg) & VGA_DISP_DISABLE)
1068 return;
1069
1070 I915_WRITE8(VGA_SR_INDEX, 1);
1071 sr1 = I915_READ8(VGA_SR_DATA);
1072 I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
1073 udelay(100);
1074
1075 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
1076}
1077
1078static void igdng_disable_pll_edp (struct drm_crtc *crtc)
1079{
1080 struct drm_device *dev = crtc->dev;
1081 struct drm_i915_private *dev_priv = dev->dev_private;
1082 u32 dpa_ctl;
1083
1084 DRM_DEBUG("\n");
1085 dpa_ctl = I915_READ(DP_A);
1086 dpa_ctl &= ~DP_PLL_ENABLE;
1087 I915_WRITE(DP_A, dpa_ctl);
1088}
1089
1090static void igdng_enable_pll_edp (struct drm_crtc *crtc)
1091{
1092 struct drm_device *dev = crtc->dev;
1093 struct drm_i915_private *dev_priv = dev->dev_private;
1094 u32 dpa_ctl;
1095
1096 dpa_ctl = I915_READ(DP_A);
1097 dpa_ctl |= DP_PLL_ENABLE;
1098 I915_WRITE(DP_A, dpa_ctl);
1099 udelay(200);
1100}
1101
1102
1103static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
1104{
1105 struct drm_device *dev = crtc->dev;
1106 struct drm_i915_private *dev_priv = dev->dev_private;
1107 u32 dpa_ctl;
1108
1109 DRM_DEBUG("eDP PLL enable for clock %d\n", clock);
1110 dpa_ctl = I915_READ(DP_A);
1111 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1112
1113 if (clock < 200000) {
1114 u32 temp;
1115 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1116 /* workaround for 160Mhz:
1117 1) program 0x4600c bits 15:0 = 0x8124
1118 2) program 0x46010 bit 0 = 1
1119 3) program 0x46034 bit 24 = 1
1120 4) program 0x64000 bit 14 = 1
1121 */
1122 temp = I915_READ(0x4600c);
1123 temp &= 0xffff0000;
1124 I915_WRITE(0x4600c, temp | 0x8124);
1125
1126 temp = I915_READ(0x46010);
1127 I915_WRITE(0x46010, temp | 1);
1128
1129 temp = I915_READ(0x46034);
1130 I915_WRITE(0x46034, temp | (1 << 24));
1131 } else {
1132 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1133 }
1134 I915_WRITE(DP_A, dpa_ctl);
1135
1136 udelay(500);
1137}
1138
930static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) 1139static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
931{ 1140{
932 struct drm_device *dev = crtc->dev; 1141 struct drm_device *dev = crtc->dev;
933 struct drm_i915_private *dev_priv = dev->dev_private; 1142 struct drm_i915_private *dev_priv = dev->dev_private;
934 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1143 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
935 int pipe = intel_crtc->pipe; 1144 int pipe = intel_crtc->pipe;
936 int plane = intel_crtc->pipe; 1145 int plane = intel_crtc->plane;
937 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; 1146 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
938 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 1147 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
939 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; 1148 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
@@ -944,6 +1153,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
944 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; 1153 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
945 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; 1154 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
946 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; 1155 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
1156 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
947 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 1157 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
948 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 1158 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
949 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; 1159 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -957,7 +1167,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
957 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; 1167 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
958 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1168 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
959 u32 temp; 1169 u32 temp;
960 int tries = 5, j; 1170 int tries = 5, j, n;
961 1171
962 /* XXX: When our outputs are all unaware of DPMS modes other than off 1172 /* XXX: When our outputs are all unaware of DPMS modes other than off
963 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1173 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -967,27 +1177,32 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
967 case DRM_MODE_DPMS_STANDBY: 1177 case DRM_MODE_DPMS_STANDBY:
968 case DRM_MODE_DPMS_SUSPEND: 1178 case DRM_MODE_DPMS_SUSPEND:
969 DRM_DEBUG("crtc %d dpms on\n", pipe); 1179 DRM_DEBUG("crtc %d dpms on\n", pipe);
970 /* enable PCH DPLL */ 1180 if (HAS_eDP) {
971 temp = I915_READ(pch_dpll_reg); 1181 /* enable eDP PLL */
972 if ((temp & DPLL_VCO_ENABLE) == 0) { 1182 igdng_enable_pll_edp(crtc);
973 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); 1183 } else {
974 I915_READ(pch_dpll_reg); 1184 /* enable PCH DPLL */
975 } 1185 temp = I915_READ(pch_dpll_reg);
976 1186 if ((temp & DPLL_VCO_ENABLE) == 0) {
977 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1187 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
978 temp = I915_READ(fdi_rx_reg); 1188 I915_READ(pch_dpll_reg);
979 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1189 }
980 FDI_SEL_PCDCLK |
981 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
982 I915_READ(fdi_rx_reg);
983 udelay(200);
984 1190
985 /* Enable CPU FDI TX PLL, always on for IGDNG */ 1191 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
986 temp = I915_READ(fdi_tx_reg); 1192 temp = I915_READ(fdi_rx_reg);
987 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 1193 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
988 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); 1194 FDI_SEL_PCDCLK |
989 I915_READ(fdi_tx_reg); 1195 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
990 udelay(100); 1196 I915_READ(fdi_rx_reg);
1197 udelay(200);
1198
1199 /* Enable CPU FDI TX PLL, always on for IGDNG */
1200 temp = I915_READ(fdi_tx_reg);
1201 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1202 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
1203 I915_READ(fdi_tx_reg);
1204 udelay(100);
1205 }
991 } 1206 }
992 1207
993 /* Enable CPU pipe */ 1208 /* Enable CPU pipe */
@@ -1006,122 +1221,126 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1006 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); 1221 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1007 } 1222 }
1008 1223
1009 /* enable CPU FDI TX and PCH FDI RX */ 1224 if (!HAS_eDP) {
1010 temp = I915_READ(fdi_tx_reg); 1225 /* enable CPU FDI TX and PCH FDI RX */
1011 temp |= FDI_TX_ENABLE; 1226 temp = I915_READ(fdi_tx_reg);
1012 temp |= FDI_DP_PORT_WIDTH_X4; /* default */ 1227 temp |= FDI_TX_ENABLE;
1013 temp &= ~FDI_LINK_TRAIN_NONE; 1228 temp |= FDI_DP_PORT_WIDTH_X4; /* default */
1014 temp |= FDI_LINK_TRAIN_PATTERN_1; 1229 temp &= ~FDI_LINK_TRAIN_NONE;
1015 I915_WRITE(fdi_tx_reg, temp); 1230 temp |= FDI_LINK_TRAIN_PATTERN_1;
1016 I915_READ(fdi_tx_reg); 1231 I915_WRITE(fdi_tx_reg, temp);
1232 I915_READ(fdi_tx_reg);
1017 1233
1018 temp = I915_READ(fdi_rx_reg); 1234 temp = I915_READ(fdi_rx_reg);
1019 temp &= ~FDI_LINK_TRAIN_NONE; 1235 temp &= ~FDI_LINK_TRAIN_NONE;
1020 temp |= FDI_LINK_TRAIN_PATTERN_1; 1236 temp |= FDI_LINK_TRAIN_PATTERN_1;
1021 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); 1237 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1022 I915_READ(fdi_rx_reg); 1238 I915_READ(fdi_rx_reg);
1023 1239
1024 udelay(150); 1240 udelay(150);
1025 1241
1026 /* Train FDI. */ 1242 /* Train FDI. */
1027 /* umask FDI RX Interrupt symbol_lock and bit_lock bit 1243 /* umask FDI RX Interrupt symbol_lock and bit_lock bit
1028 for train result */ 1244 for train result */
1029 temp = I915_READ(fdi_rx_imr_reg); 1245 temp = I915_READ(fdi_rx_imr_reg);
1030 temp &= ~FDI_RX_SYMBOL_LOCK; 1246 temp &= ~FDI_RX_SYMBOL_LOCK;
1031 temp &= ~FDI_RX_BIT_LOCK; 1247 temp &= ~FDI_RX_BIT_LOCK;
1032 I915_WRITE(fdi_rx_imr_reg, temp); 1248 I915_WRITE(fdi_rx_imr_reg, temp);
1033 I915_READ(fdi_rx_imr_reg); 1249 I915_READ(fdi_rx_imr_reg);
1034 udelay(150); 1250 udelay(150);
1035 1251
1036 temp = I915_READ(fdi_rx_iir_reg); 1252 temp = I915_READ(fdi_rx_iir_reg);
1037 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1253 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1038 1254
1039 if ((temp & FDI_RX_BIT_LOCK) == 0) { 1255 if ((temp & FDI_RX_BIT_LOCK) == 0) {
1040 for (j = 0; j < tries; j++) { 1256 for (j = 0; j < tries; j++) {
1041 temp = I915_READ(fdi_rx_iir_reg); 1257 temp = I915_READ(fdi_rx_iir_reg);
1042 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1258 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1043 if (temp & FDI_RX_BIT_LOCK) 1259 if (temp & FDI_RX_BIT_LOCK)
1044 break; 1260 break;
1045 udelay(200); 1261 udelay(200);
1046 } 1262 }
1047 if (j != tries) 1263 if (j != tries)
1264 I915_WRITE(fdi_rx_iir_reg,
1265 temp | FDI_RX_BIT_LOCK);
1266 else
1267 DRM_DEBUG("train 1 fail\n");
1268 } else {
1048 I915_WRITE(fdi_rx_iir_reg, 1269 I915_WRITE(fdi_rx_iir_reg,
1049 temp | FDI_RX_BIT_LOCK); 1270 temp | FDI_RX_BIT_LOCK);
1050 else 1271 DRM_DEBUG("train 1 ok 2!\n");
1051 DRM_DEBUG("train 1 fail\n"); 1272 }
1052 } else { 1273 temp = I915_READ(fdi_tx_reg);
1053 I915_WRITE(fdi_rx_iir_reg, 1274 temp &= ~FDI_LINK_TRAIN_NONE;
1054 temp | FDI_RX_BIT_LOCK); 1275 temp |= FDI_LINK_TRAIN_PATTERN_2;
1055 DRM_DEBUG("train 1 ok 2!\n"); 1276 I915_WRITE(fdi_tx_reg, temp);
1056 } 1277
1057 temp = I915_READ(fdi_tx_reg); 1278 temp = I915_READ(fdi_rx_reg);
1058 temp &= ~FDI_LINK_TRAIN_NONE; 1279 temp &= ~FDI_LINK_TRAIN_NONE;
1059 temp |= FDI_LINK_TRAIN_PATTERN_2; 1280 temp |= FDI_LINK_TRAIN_PATTERN_2;
1060 I915_WRITE(fdi_tx_reg, temp); 1281 I915_WRITE(fdi_rx_reg, temp);
1061
1062 temp = I915_READ(fdi_rx_reg);
1063 temp &= ~FDI_LINK_TRAIN_NONE;
1064 temp |= FDI_LINK_TRAIN_PATTERN_2;
1065 I915_WRITE(fdi_rx_reg, temp);
1066 1282
1067 udelay(150); 1283 udelay(150);
1068 1284
1069 temp = I915_READ(fdi_rx_iir_reg); 1285 temp = I915_READ(fdi_rx_iir_reg);
1070 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1286 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1071 1287
1072 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { 1288 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
1073 for (j = 0; j < tries; j++) { 1289 for (j = 0; j < tries; j++) {
1074 temp = I915_READ(fdi_rx_iir_reg); 1290 temp = I915_READ(fdi_rx_iir_reg);
1075 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1291 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1076 if (temp & FDI_RX_SYMBOL_LOCK) 1292 if (temp & FDI_RX_SYMBOL_LOCK)
1077 break; 1293 break;
1078 udelay(200); 1294 udelay(200);
1079 } 1295 }
1080 if (j != tries) { 1296 if (j != tries) {
1297 I915_WRITE(fdi_rx_iir_reg,
1298 temp | FDI_RX_SYMBOL_LOCK);
1299 DRM_DEBUG("train 2 ok 1!\n");
1300 } else
1301 DRM_DEBUG("train 2 fail\n");
1302 } else {
1081 I915_WRITE(fdi_rx_iir_reg, 1303 I915_WRITE(fdi_rx_iir_reg,
1082 temp | FDI_RX_SYMBOL_LOCK); 1304 temp | FDI_RX_SYMBOL_LOCK);
1083 DRM_DEBUG("train 2 ok 1!\n"); 1305 DRM_DEBUG("train 2 ok 2!\n");
1084 } else 1306 }
1085 DRM_DEBUG("train 2 fail\n"); 1307 DRM_DEBUG("train done\n");
1086 } else {
1087 I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK);
1088 DRM_DEBUG("train 2 ok 2!\n");
1089 }
1090 DRM_DEBUG("train done\n");
1091 1308
1092 /* set transcoder timing */ 1309 /* set transcoder timing */
1093 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); 1310 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
1094 I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); 1311 I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
1095 I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); 1312 I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
1096 1313
1097 I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); 1314 I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
1098 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); 1315 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
1099 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); 1316 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
1100 1317
1101 /* enable PCH transcoder */ 1318 /* enable PCH transcoder */
1102 temp = I915_READ(transconf_reg); 1319 temp = I915_READ(transconf_reg);
1103 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 1320 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1104 I915_READ(transconf_reg); 1321 I915_READ(transconf_reg);
1105 1322
1106 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) 1323 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
1107 ; 1324 ;
1108 1325
1109 /* enable normal */ 1326 /* enable normal */
1110 1327
1111 temp = I915_READ(fdi_tx_reg); 1328 temp = I915_READ(fdi_tx_reg);
1112 temp &= ~FDI_LINK_TRAIN_NONE; 1329 temp &= ~FDI_LINK_TRAIN_NONE;
1113 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | 1330 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
1114 FDI_TX_ENHANCE_FRAME_ENABLE); 1331 FDI_TX_ENHANCE_FRAME_ENABLE);
1115 I915_READ(fdi_tx_reg); 1332 I915_READ(fdi_tx_reg);
1116 1333
1117 temp = I915_READ(fdi_rx_reg); 1334 temp = I915_READ(fdi_rx_reg);
1118 temp &= ~FDI_LINK_TRAIN_NONE; 1335 temp &= ~FDI_LINK_TRAIN_NONE;
1119 I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | 1336 I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
1120 FDI_RX_ENHANCE_FRAME_ENABLE); 1337 FDI_RX_ENHANCE_FRAME_ENABLE);
1121 I915_READ(fdi_rx_reg); 1338 I915_READ(fdi_rx_reg);
1122 1339
1123 /* wait one idle pattern time */ 1340 /* wait one idle pattern time */
1124 udelay(100); 1341 udelay(100);
1342
1343 }
1125 1344
1126 intel_crtc_load_lut(crtc); 1345 intel_crtc_load_lut(crtc);
1127 1346
@@ -1129,8 +1348,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1129 case DRM_MODE_DPMS_OFF: 1348 case DRM_MODE_DPMS_OFF:
1130 DRM_DEBUG("crtc %d dpms off\n", pipe); 1349 DRM_DEBUG("crtc %d dpms off\n", pipe);
1131 1350
1132 /* Disable the VGA plane that we never use */ 1351 i915_disable_vga(dev);
1133 I915_WRITE(CPU_VGACNTRL, VGA_DISP_DISABLE);
1134 1352
1135 /* Disable display plane */ 1353 /* Disable display plane */
1136 temp = I915_READ(dspcntr_reg); 1354 temp = I915_READ(dspcntr_reg);
@@ -1146,17 +1364,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1146 if ((temp & PIPEACONF_ENABLE) != 0) { 1364 if ((temp & PIPEACONF_ENABLE) != 0) {
1147 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); 1365 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
1148 I915_READ(pipeconf_reg); 1366 I915_READ(pipeconf_reg);
1367 n = 0;
1149 /* wait for cpu pipe off, pipe state */ 1368 /* wait for cpu pipe off, pipe state */
1150 while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) 1369 while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) {
1151 ; 1370 n++;
1371 if (n < 60) {
1372 udelay(500);
1373 continue;
1374 } else {
1375 DRM_DEBUG("pipe %d off delay\n", pipe);
1376 break;
1377 }
1378 }
1152 } else 1379 } else
1153 DRM_DEBUG("crtc %d is disabled\n", pipe); 1380 DRM_DEBUG("crtc %d is disabled\n", pipe);
1154 1381
1155 /* IGDNG-A : disable cpu panel fitter ? */ 1382 if (HAS_eDP) {
1156 temp = I915_READ(pf_ctl_reg); 1383 igdng_disable_pll_edp(crtc);
1157 if ((temp & PF_ENABLE) != 0) {
1158 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1159 I915_READ(pf_ctl_reg);
1160 } 1384 }
1161 1385
1162 /* disable CPU FDI tx and PCH FDI rx */ 1386 /* disable CPU FDI tx and PCH FDI rx */
@@ -1168,6 +1392,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1168 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 1392 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1169 I915_READ(fdi_rx_reg); 1393 I915_READ(fdi_rx_reg);
1170 1394
1395 udelay(100);
1396
1171 /* still set train pattern 1 */ 1397 /* still set train pattern 1 */
1172 temp = I915_READ(fdi_tx_reg); 1398 temp = I915_READ(fdi_tx_reg);
1173 temp &= ~FDI_LINK_TRAIN_NONE; 1399 temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1179,14 +1405,25 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1179 temp |= FDI_LINK_TRAIN_PATTERN_1; 1405 temp |= FDI_LINK_TRAIN_PATTERN_1;
1180 I915_WRITE(fdi_rx_reg, temp); 1406 I915_WRITE(fdi_rx_reg, temp);
1181 1407
1408 udelay(100);
1409
1182 /* disable PCH transcoder */ 1410 /* disable PCH transcoder */
1183 temp = I915_READ(transconf_reg); 1411 temp = I915_READ(transconf_reg);
1184 if ((temp & TRANS_ENABLE) != 0) { 1412 if ((temp & TRANS_ENABLE) != 0) {
1185 I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); 1413 I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
1186 I915_READ(transconf_reg); 1414 I915_READ(transconf_reg);
1415 n = 0;
1187 /* wait for PCH transcoder off, transcoder state */ 1416 /* wait for PCH transcoder off, transcoder state */
1188 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) 1417 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) {
1189 ; 1418 n++;
1419 if (n < 60) {
1420 udelay(500);
1421 continue;
1422 } else {
1423 DRM_DEBUG("transcoder %d off delay\n", pipe);
1424 break;
1425 }
1426 }
1190 } 1427 }
1191 1428
1192 /* disable PCH DPLL */ 1429 /* disable PCH DPLL */
@@ -1204,6 +1441,22 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1204 I915_READ(fdi_rx_reg); 1441 I915_READ(fdi_rx_reg);
1205 } 1442 }
1206 1443
1444 /* Disable CPU FDI TX PLL */
1445 temp = I915_READ(fdi_tx_reg);
1446 if ((temp & FDI_TX_PLL_ENABLE) != 0) {
1447 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
1448 I915_READ(fdi_tx_reg);
1449 udelay(100);
1450 }
1451
1452 /* Disable PF */
1453 temp = I915_READ(pf_ctl_reg);
1454 if ((temp & PF_ENABLE) != 0) {
1455 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1456 I915_READ(pf_ctl_reg);
1457 }
1458 I915_WRITE(pf_win_size, 0);
1459
1207 /* Wait for the clocks to turn off. */ 1460 /* Wait for the clocks to turn off. */
1208 udelay(150); 1461 udelay(150);
1209 break; 1462 break;
@@ -1263,13 +1516,15 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1263 1516
1264 /* Give the overlay scaler a chance to enable if it's on this pipe */ 1517 /* Give the overlay scaler a chance to enable if it's on this pipe */
1265 //intel_crtc_dpms_video(crtc, true); TODO 1518 //intel_crtc_dpms_video(crtc, true); TODO
1519 intel_update_watermarks(dev);
1266 break; 1520 break;
1267 case DRM_MODE_DPMS_OFF: 1521 case DRM_MODE_DPMS_OFF:
1522 intel_update_watermarks(dev);
1268 /* Give the overlay scaler a chance to disable if it's on this pipe */ 1523 /* Give the overlay scaler a chance to disable if it's on this pipe */
1269 //intel_crtc_dpms_video(crtc, FALSE); TODO 1524 //intel_crtc_dpms_video(crtc, FALSE); TODO
1270 1525
1271 /* Disable the VGA plane that we never use */ 1526 /* Disable the VGA plane that we never use */
1272 I915_WRITE(VGACNTRL, VGA_DISP_DISABLE); 1527 i915_disable_vga(dev);
1273 1528
1274 /* Disable display plane */ 1529 /* Disable display plane */
1275 temp = I915_READ(dspcntr_reg); 1530 temp = I915_READ(dspcntr_reg);
@@ -1443,7 +1698,6 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
1443 return 0; /* Silence gcc warning */ 1698 return 0; /* Silence gcc warning */
1444} 1699}
1445 1700
1446
1447/** 1701/**
1448 * Return the pipe currently connected to the panel fitter, 1702 * Return the pipe currently connected to the panel fitter,
1449 * or -1 if the panel fitter is not present or not in use 1703 * or -1 if the panel fitter is not present or not in use
@@ -1502,7 +1756,7 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes,
1502 1756
1503 temp = (u64) DATA_N * pixel_clock; 1757 temp = (u64) DATA_N * pixel_clock;
1504 temp = div_u64(temp, link_clock); 1758 temp = div_u64(temp, link_clock);
1505 m_n->gmch_m = (temp * bytes_per_pixel) / nlanes; 1759 m_n->gmch_m = div_u64(temp * bytes_per_pixel, nlanes);
1506 m_n->gmch_n = DATA_N; 1760 m_n->gmch_n = DATA_N;
1507 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 1761 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
1508 1762
@@ -1513,6 +1767,464 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes,
1513} 1767}
1514 1768
1515 1769
1770struct intel_watermark_params {
1771 unsigned long fifo_size;
1772 unsigned long max_wm;
1773 unsigned long default_wm;
1774 unsigned long guard_size;
1775 unsigned long cacheline_size;
1776};
1777
1778/* IGD has different values for various configs */
1779static struct intel_watermark_params igd_display_wm = {
1780 IGD_DISPLAY_FIFO,
1781 IGD_MAX_WM,
1782 IGD_DFT_WM,
1783 IGD_GUARD_WM,
1784 IGD_FIFO_LINE_SIZE
1785};
1786static struct intel_watermark_params igd_display_hplloff_wm = {
1787 IGD_DISPLAY_FIFO,
1788 IGD_MAX_WM,
1789 IGD_DFT_HPLLOFF_WM,
1790 IGD_GUARD_WM,
1791 IGD_FIFO_LINE_SIZE
1792};
1793static struct intel_watermark_params igd_cursor_wm = {
1794 IGD_CURSOR_FIFO,
1795 IGD_CURSOR_MAX_WM,
1796 IGD_CURSOR_DFT_WM,
1797 IGD_CURSOR_GUARD_WM,
1798 IGD_FIFO_LINE_SIZE,
1799};
1800static struct intel_watermark_params igd_cursor_hplloff_wm = {
1801 IGD_CURSOR_FIFO,
1802 IGD_CURSOR_MAX_WM,
1803 IGD_CURSOR_DFT_WM,
1804 IGD_CURSOR_GUARD_WM,
1805 IGD_FIFO_LINE_SIZE
1806};
1807static struct intel_watermark_params i945_wm_info = {
1808 I945_FIFO_SIZE,
1809 I915_MAX_WM,
1810 1,
1811 2,
1812 I915_FIFO_LINE_SIZE
1813};
1814static struct intel_watermark_params i915_wm_info = {
1815 I915_FIFO_SIZE,
1816 I915_MAX_WM,
1817 1,
1818 2,
1819 I915_FIFO_LINE_SIZE
1820};
1821static struct intel_watermark_params i855_wm_info = {
1822 I855GM_FIFO_SIZE,
1823 I915_MAX_WM,
1824 1,
1825 2,
1826 I830_FIFO_LINE_SIZE
1827};
1828static struct intel_watermark_params i830_wm_info = {
1829 I830_FIFO_SIZE,
1830 I915_MAX_WM,
1831 1,
1832 2,
1833 I830_FIFO_LINE_SIZE
1834};
1835
1836/**
1837 * intel_calculate_wm - calculate watermark level
1838 * @clock_in_khz: pixel clock
1839 * @wm: chip FIFO params
1840 * @pixel_size: display pixel size
1841 * @latency_ns: memory latency for the platform
1842 *
1843 * Calculate the watermark level (the level at which the display plane will
1844 * start fetching from memory again). Each chip has a different display
1845 * FIFO size and allocation, so the caller needs to figure that out and pass
1846 * in the correct intel_watermark_params structure.
1847 *
1848 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1849 * on the pixel size. When it reaches the watermark level, it'll start
1850 * fetching FIFO line sized based chunks from memory until the FIFO fills
1851 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1852 * will occur, and a display engine hang could result.
1853 */
1854static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1855 struct intel_watermark_params *wm,
1856 int pixel_size,
1857 unsigned long latency_ns)
1858{
1859 long entries_required, wm_size;
1860
1861 entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000;
1862 entries_required /= wm->cacheline_size;
1863
1864 DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
1865
1866 wm_size = wm->fifo_size - (entries_required + wm->guard_size);
1867
1868 DRM_DEBUG("FIFO watermark level: %d\n", wm_size);
1869
1870 /* Don't promote wm_size to unsigned... */
1871 if (wm_size > (long)wm->max_wm)
1872 wm_size = wm->max_wm;
1873 if (wm_size <= 0)
1874 wm_size = wm->default_wm;
1875 return wm_size;
1876}
1877
1878struct cxsr_latency {
1879 int is_desktop;
1880 unsigned long fsb_freq;
1881 unsigned long mem_freq;
1882 unsigned long display_sr;
1883 unsigned long display_hpll_disable;
1884 unsigned long cursor_sr;
1885 unsigned long cursor_hpll_disable;
1886};
1887
1888static struct cxsr_latency cxsr_latency_table[] = {
1889 {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
1890 {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
1891 {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
1892
1893 {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
1894 {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
1895 {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
1896
1897 {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
1898 {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
1899 {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
1900
1901 {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
1902 {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
1903 {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
1904
1905 {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
1906 {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
1907 {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
1908
1909 {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
1910 {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
1911 {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
1912};
1913
1914static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
1915 int mem)
1916{
1917 int i;
1918 struct cxsr_latency *latency;
1919
1920 if (fsb == 0 || mem == 0)
1921 return NULL;
1922
1923 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
1924 latency = &cxsr_latency_table[i];
1925 if (is_desktop == latency->is_desktop &&
1926 fsb == latency->fsb_freq && mem == latency->mem_freq)
1927 break;
1928 }
1929 if (i >= ARRAY_SIZE(cxsr_latency_table)) {
1930 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
1931 return NULL;
1932 }
1933 return latency;
1934}
1935
1936static void igd_disable_cxsr(struct drm_device *dev)
1937{
1938 struct drm_i915_private *dev_priv = dev->dev_private;
1939 u32 reg;
1940
1941 /* deactivate cxsr */
1942 reg = I915_READ(DSPFW3);
1943 reg &= ~(IGD_SELF_REFRESH_EN);
1944 I915_WRITE(DSPFW3, reg);
1945 DRM_INFO("Big FIFO is disabled\n");
1946}
1947
1948static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
1949 int pixel_size)
1950{
1951 struct drm_i915_private *dev_priv = dev->dev_private;
1952 u32 reg;
1953 unsigned long wm;
1954 struct cxsr_latency *latency;
1955
1956 latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq,
1957 dev_priv->mem_freq);
1958 if (!latency) {
1959 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
1960 igd_disable_cxsr(dev);
1961 return;
1962 }
1963
1964 /* Display SR */
1965 wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size,
1966 latency->display_sr);
1967 reg = I915_READ(DSPFW1);
1968 reg &= 0x7fffff;
1969 reg |= wm << 23;
1970 I915_WRITE(DSPFW1, reg);
1971 DRM_DEBUG("DSPFW1 register is %x\n", reg);
1972
1973 /* cursor SR */
1974 wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size,
1975 latency->cursor_sr);
1976 reg = I915_READ(DSPFW3);
1977 reg &= ~(0x3f << 24);
1978 reg |= (wm & 0x3f) << 24;
1979 I915_WRITE(DSPFW3, reg);
1980
1981 /* Display HPLL off SR */
1982 wm = intel_calculate_wm(clock, &igd_display_hplloff_wm,
1983 latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
1984 reg = I915_READ(DSPFW3);
1985 reg &= 0xfffffe00;
1986 reg |= wm & 0x1ff;
1987 I915_WRITE(DSPFW3, reg);
1988
1989 /* cursor HPLL off SR */
1990 wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size,
1991 latency->cursor_hpll_disable);
1992 reg = I915_READ(DSPFW3);
1993 reg &= ~(0x3f << 16);
1994 reg |= (wm & 0x3f) << 16;
1995 I915_WRITE(DSPFW3, reg);
1996 DRM_DEBUG("DSPFW3 register is %x\n", reg);
1997
1998 /* activate cxsr */
1999 reg = I915_READ(DSPFW3);
2000 reg |= IGD_SELF_REFRESH_EN;
2001 I915_WRITE(DSPFW3, reg);
2002
2003 DRM_INFO("Big FIFO is enabled\n");
2004
2005 return;
2006}
2007
2008const static int latency_ns = 3000; /* default for non-igd platforms */
2009
2010static int intel_get_fifo_size(struct drm_device *dev, int plane)
2011{
2012 struct drm_i915_private *dev_priv = dev->dev_private;
2013 uint32_t dsparb = I915_READ(DSPARB);
2014 int size;
2015
2016 if (IS_I9XX(dev)) {
2017 if (plane == 0)
2018 size = dsparb & 0x7f;
2019 else
2020 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2021 (dsparb & 0x7f);
2022 } else if (IS_I85X(dev)) {
2023 if (plane == 0)
2024 size = dsparb & 0x1ff;
2025 else
2026 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
2027 (dsparb & 0x1ff);
2028 size >>= 1; /* Convert to cachelines */
2029 } else if (IS_845G(dev)) {
2030 size = dsparb & 0x7f;
2031 size >>= 2; /* Convert to cachelines */
2032 } else {
2033 size = dsparb & 0x7f;
2034 size >>= 1; /* Convert to cachelines */
2035 }
2036
2037 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
2038 size);
2039
2040 return size;
2041}
2042
2043static void i965_update_wm(struct drm_device *dev)
2044{
2045 struct drm_i915_private *dev_priv = dev->dev_private;
2046
2047 DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n");
2048
2049 /* 965 has limitations... */
2050 I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0));
2051 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
2052}
2053
2054static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2055 int planeb_clock, int sr_hdisplay, int pixel_size)
2056{
2057 struct drm_i915_private *dev_priv = dev->dev_private;
2058 uint32_t fwater_lo;
2059 uint32_t fwater_hi;
2060 int total_size, cacheline_size, cwm, srwm = 1;
2061 int planea_wm, planeb_wm;
2062 struct intel_watermark_params planea_params, planeb_params;
2063 unsigned long line_time_us;
2064 int sr_clock, sr_entries = 0;
2065
2066 /* Create copies of the base settings for each pipe */
2067 if (IS_I965GM(dev) || IS_I945GM(dev))
2068 planea_params = planeb_params = i945_wm_info;
2069 else if (IS_I9XX(dev))
2070 planea_params = planeb_params = i915_wm_info;
2071 else
2072 planea_params = planeb_params = i855_wm_info;
2073
2074 /* Grab a couple of global values before we overwrite them */
2075 total_size = planea_params.fifo_size;
2076 cacheline_size = planea_params.cacheline_size;
2077
2078 /* Update per-plane FIFO sizes */
2079 planea_params.fifo_size = intel_get_fifo_size(dev, 0);
2080 planeb_params.fifo_size = intel_get_fifo_size(dev, 1);
2081
2082 planea_wm = intel_calculate_wm(planea_clock, &planea_params,
2083 pixel_size, latency_ns);
2084 planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
2085 pixel_size, latency_ns);
2086 DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2087
2088 /*
2089 * Overlay gets an aggressive default since video jitter is bad.
2090 */
2091 cwm = 2;
2092
2093 /* Calc sr entries for one plane configs */
2094 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2095 /* self-refresh has much higher latency */
2096 const static int sr_latency_ns = 6000;
2097
2098 sr_clock = planea_clock ? planea_clock : planeb_clock;
2099 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
2100
2101 /* Use ns/us then divide to preserve precision */
2102 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2103 pixel_size * sr_hdisplay) / 1000;
2104 sr_entries = roundup(sr_entries / cacheline_size, 1);
2105 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2106 srwm = total_size - sr_entries;
2107 if (srwm < 0)
2108 srwm = 1;
2109 if (IS_I9XX(dev))
2110 I915_WRITE(FW_BLC_SELF, (srwm & 0x3f));
2111 }
2112
2113 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2114 planea_wm, planeb_wm, cwm, srwm);
2115
2116 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2117 fwater_hi = (cwm & 0x1f);
2118
2119 /* Set request length to 8 cachelines per fetch */
2120 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2121 fwater_hi = fwater_hi | (1 << 8);
2122
2123 I915_WRITE(FW_BLC, fwater_lo);
2124 I915_WRITE(FW_BLC2, fwater_hi);
2125}
2126
2127static void i830_update_wm(struct drm_device *dev, int planea_clock,
2128 int pixel_size)
2129{
2130 struct drm_i915_private *dev_priv = dev->dev_private;
2131 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2132 int planea_wm;
2133
2134 i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0);
2135
2136 planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
2137 pixel_size, latency_ns);
2138 fwater_lo |= (3<<8) | planea_wm;
2139
2140 DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm);
2141
2142 I915_WRITE(FW_BLC, fwater_lo);
2143}
2144
2145/**
2146 * intel_update_watermarks - update FIFO watermark values based on current modes
2147 *
2148 * Calculate watermark values for the various WM regs based on current mode
2149 * and plane configuration.
2150 *
2151 * There are several cases to deal with here:
2152 * - normal (i.e. non-self-refresh)
2153 * - self-refresh (SR) mode
2154 * - lines are large relative to FIFO size (buffer can hold up to 2)
2155 * - lines are small relative to FIFO size (buffer can hold more than 2
2156 * lines), so need to account for TLB latency
2157 *
2158 * The normal calculation is:
2159 * watermark = dotclock * bytes per pixel * latency
2160 * where latency is platform & configuration dependent (we assume pessimal
2161 * values here).
2162 *
2163 * The SR calculation is:
2164 * watermark = (trunc(latency/line time)+1) * surface width *
2165 * bytes per pixel
2166 * where
2167 * line time = htotal / dotclock
2168 * and latency is assumed to be high, as above.
2169 *
2170 * The final value programmed to the register should always be rounded up,
2171 * and include an extra 2 entries to account for clock crossings.
2172 *
2173 * We don't use the sprite, so we can ignore that. And on Crestline we have
2174 * to set the non-SR watermarks to 8.
2175 */
2176static void intel_update_watermarks(struct drm_device *dev)
2177{
2178 struct drm_crtc *crtc;
2179 struct intel_crtc *intel_crtc;
2180 int sr_hdisplay = 0;
2181 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
2182 int enabled = 0, pixel_size = 0;
2183
2184 if (DSPARB_HWCONTROL(dev))
2185 return;
2186
2187 /* Get the clock config from both planes */
2188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2189 intel_crtc = to_intel_crtc(crtc);
2190 if (crtc->enabled) {
2191 enabled++;
2192 if (intel_crtc->plane == 0) {
2193 DRM_DEBUG("plane A (pipe %d) clock: %d\n",
2194 intel_crtc->pipe, crtc->mode.clock);
2195 planea_clock = crtc->mode.clock;
2196 } else {
2197 DRM_DEBUG("plane B (pipe %d) clock: %d\n",
2198 intel_crtc->pipe, crtc->mode.clock);
2199 planeb_clock = crtc->mode.clock;
2200 }
2201 sr_hdisplay = crtc->mode.hdisplay;
2202 sr_clock = crtc->mode.clock;
2203 if (crtc->fb)
2204 pixel_size = crtc->fb->bits_per_pixel / 8;
2205 else
2206 pixel_size = 4; /* by default */
2207 }
2208 }
2209
2210 if (enabled <= 0)
2211 return;
2212
2213 /* Single plane configs can enable self refresh */
2214 if (enabled == 1 && IS_IGD(dev))
2215 igd_enable_cxsr(dev, sr_clock, pixel_size);
2216 else if (IS_IGD(dev))
2217 igd_disable_cxsr(dev);
2218
2219 if (IS_I965G(dev))
2220 i965_update_wm(dev);
2221 else if (IS_I9XX(dev) || IS_MOBILE(dev))
2222 i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay,
2223 pixel_size);
2224 else
2225 i830_update_wm(dev, planea_clock, pixel_size);
2226}
2227
1516static int intel_crtc_mode_set(struct drm_crtc *crtc, 2228static int intel_crtc_mode_set(struct drm_crtc *crtc,
1517 struct drm_display_mode *mode, 2229 struct drm_display_mode *mode,
1518 struct drm_display_mode *adjusted_mode, 2230 struct drm_display_mode *adjusted_mode,
@@ -1541,7 +2253,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1541 intel_clock_t clock; 2253 intel_clock_t clock;
1542 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 2254 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
1543 bool ok, is_sdvo = false, is_dvo = false; 2255 bool ok, is_sdvo = false, is_dvo = false;
1544 bool is_crt = false, is_lvds = false, is_tv = false; 2256 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
2257 bool is_edp = false;
1545 struct drm_mode_config *mode_config = &dev->mode_config; 2258 struct drm_mode_config *mode_config = &dev->mode_config;
1546 struct drm_connector *connector; 2259 struct drm_connector *connector;
1547 const intel_limit_t *limit; 2260 const intel_limit_t *limit;
@@ -1557,6 +2270,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1557 int lvds_reg = LVDS; 2270 int lvds_reg = LVDS;
1558 u32 temp; 2271 u32 temp;
1559 int sdvo_pixel_multiply; 2272 int sdvo_pixel_multiply;
2273 int target_clock;
1560 2274
1561 drm_vblank_pre_modeset(dev, pipe); 2275 drm_vblank_pre_modeset(dev, pipe);
1562 2276
@@ -1585,6 +2299,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1585 case INTEL_OUTPUT_ANALOG: 2299 case INTEL_OUTPUT_ANALOG:
1586 is_crt = true; 2300 is_crt = true;
1587 break; 2301 break;
2302 case INTEL_OUTPUT_DISPLAYPORT:
2303 is_dp = true;
2304 break;
2305 case INTEL_OUTPUT_EDP:
2306 is_edp = true;
2307 break;
1588 } 2308 }
1589 2309
1590 num_outputs++; 2310 num_outputs++;
@@ -1600,6 +2320,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1600 } else { 2320 } else {
1601 refclk = 48000; 2321 refclk = 48000;
1602 } 2322 }
2323
1603 2324
1604 /* 2325 /*
1605 * Returns a set of divisors for the desired target clock with the given 2326 * Returns a set of divisors for the desired target clock with the given
@@ -1635,11 +2356,29 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1635 } 2356 }
1636 2357
1637 /* FDI link */ 2358 /* FDI link */
1638 if (IS_IGDNG(dev)) 2359 if (IS_IGDNG(dev)) {
1639 igdng_compute_m_n(3, 4, /* lane num 4 */ 2360 int lane, link_bw;
1640 adjusted_mode->clock, 2361 /* eDP doesn't require FDI link, so just set DP M/N
1641 270000, /* lane clock */ 2362 according to current link config */
1642 &m_n); 2363 if (is_edp) {
2364 struct drm_connector *edp;
2365 target_clock = mode->clock;
2366 edp = intel_pipe_get_output(crtc);
2367 intel_edp_link_config(to_intel_output(edp),
2368 &lane, &link_bw);
2369 } else {
2370 /* DP over FDI requires target mode clock
2371 instead of link clock */
2372 if (is_dp)
2373 target_clock = mode->clock;
2374 else
2375 target_clock = adjusted_mode->clock;
2376 lane = 4;
2377 link_bw = 270000;
2378 }
2379 igdng_compute_m_n(3, lane, target_clock,
2380 link_bw, &m_n);
2381 }
1643 2382
1644 if (IS_IGD(dev)) 2383 if (IS_IGD(dev))
1645 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 2384 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
@@ -1662,6 +2401,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1662 else if (IS_IGDNG(dev)) 2401 else if (IS_IGDNG(dev))
1663 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 2402 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1664 } 2403 }
2404 if (is_dp)
2405 dpll |= DPLL_DVO_HIGH_SPEED;
1665 2406
1666 /* compute bitmask from p1 value */ 2407 /* compute bitmask from p1 value */
1667 if (IS_IGD(dev)) 2408 if (IS_IGD(dev))
@@ -1758,29 +2499,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1758 dpll_reg = pch_dpll_reg; 2499 dpll_reg = pch_dpll_reg;
1759 } 2500 }
1760 2501
1761 if (dpll & DPLL_VCO_ENABLE) { 2502 if (is_edp) {
2503 igdng_disable_pll_edp(crtc);
2504 } else if ((dpll & DPLL_VCO_ENABLE)) {
1762 I915_WRITE(fp_reg, fp); 2505 I915_WRITE(fp_reg, fp);
1763 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 2506 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
1764 I915_READ(dpll_reg); 2507 I915_READ(dpll_reg);
1765 udelay(150); 2508 udelay(150);
1766 } 2509 }
1767 2510
1768 if (IS_IGDNG(dev)) {
1769 /* enable PCH clock reference source */
1770 /* XXX need to change the setting for other outputs */
1771 u32 temp;
1772 temp = I915_READ(PCH_DREF_CONTROL);
1773 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
1774 temp |= DREF_NONSPREAD_CK505_ENABLE;
1775 temp &= ~DREF_SSC_SOURCE_MASK;
1776 temp |= DREF_SSC_SOURCE_ENABLE;
1777 temp &= ~DREF_SSC1_ENABLE;
1778 /* if no eDP, disable source output to CPU */
1779 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
1780 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1781 I915_WRITE(PCH_DREF_CONTROL, temp);
1782 }
1783
1784 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 2511 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
1785 * This is an exception to the general rule that mode_set doesn't turn 2512 * This is an exception to the general rule that mode_set doesn't turn
1786 * things on. 2513 * things on.
@@ -1809,24 +2536,28 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1809 I915_WRITE(lvds_reg, lvds); 2536 I915_WRITE(lvds_reg, lvds);
1810 I915_READ(lvds_reg); 2537 I915_READ(lvds_reg);
1811 } 2538 }
2539 if (is_dp)
2540 intel_dp_set_m_n(crtc, mode, adjusted_mode);
1812 2541
1813 I915_WRITE(fp_reg, fp); 2542 if (!is_edp) {
1814 I915_WRITE(dpll_reg, dpll); 2543 I915_WRITE(fp_reg, fp);
1815 I915_READ(dpll_reg);
1816 /* Wait for the clocks to stabilize. */
1817 udelay(150);
1818
1819 if (IS_I965G(dev) && !IS_IGDNG(dev)) {
1820 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1821 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
1822 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
1823 } else {
1824 /* write it again -- the BIOS does, after all */
1825 I915_WRITE(dpll_reg, dpll); 2544 I915_WRITE(dpll_reg, dpll);
2545 I915_READ(dpll_reg);
2546 /* Wait for the clocks to stabilize. */
2547 udelay(150);
2548
2549 if (IS_I965G(dev) && !IS_IGDNG(dev)) {
2550 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
2551 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
2552 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
2553 } else {
2554 /* write it again -- the BIOS does, after all */
2555 I915_WRITE(dpll_reg, dpll);
2556 }
2557 I915_READ(dpll_reg);
2558 /* Wait for the clocks to stabilize. */
2559 udelay(150);
1826 } 2560 }
1827 I915_READ(dpll_reg);
1828 /* Wait for the clocks to stabilize. */
1829 udelay(150);
1830 2561
1831 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 2562 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
1832 ((adjusted_mode->crtc_htotal - 1) << 16)); 2563 ((adjusted_mode->crtc_htotal - 1) << 16));
@@ -1856,10 +2587,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1856 I915_WRITE(link_m1_reg, m_n.link_m); 2587 I915_WRITE(link_m1_reg, m_n.link_m);
1857 I915_WRITE(link_n1_reg, m_n.link_n); 2588 I915_WRITE(link_n1_reg, m_n.link_n);
1858 2589
1859 /* enable FDI RX PLL too */ 2590 if (is_edp) {
1860 temp = I915_READ(fdi_rx_reg); 2591 igdng_set_pll_edp(crtc, adjusted_mode->clock);
1861 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); 2592 } else {
1862 udelay(200); 2593 /* enable FDI RX PLL too */
2594 temp = I915_READ(fdi_rx_reg);
2595 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
2596 udelay(200);
2597 }
1863 } 2598 }
1864 2599
1865 I915_WRITE(pipeconf_reg, pipeconf); 2600 I915_WRITE(pipeconf_reg, pipeconf);
@@ -1871,6 +2606,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1871 2606
1872 /* Flush the plane changes */ 2607 /* Flush the plane changes */
1873 ret = intel_pipe_set_base(crtc, x, y, old_fb); 2608 ret = intel_pipe_set_base(crtc, x, y, old_fb);
2609
2610 intel_update_watermarks(dev);
2611
1874 drm_vblank_post_modeset(dev, pipe); 2612 drm_vblank_post_modeset(dev, pipe);
1875 2613
1876 return ret; 2614 return ret;
@@ -2359,6 +3097,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
2359 3097
2360 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 3098 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
2361 intel_crtc->pipe = pipe; 3099 intel_crtc->pipe = pipe;
3100 intel_crtc->plane = pipe;
2362 for (i = 0; i < 256; i++) { 3101 for (i = 0; i < 256; i++) {
2363 intel_crtc->lut_r[i] = i; 3102 intel_crtc->lut_r[i] = i;
2364 intel_crtc->lut_g[i] = i; 3103 intel_crtc->lut_g[i] = i;
@@ -2453,12 +3192,17 @@ static void intel_setup_outputs(struct drm_device *dev)
2453 if (IS_IGDNG(dev)) { 3192 if (IS_IGDNG(dev)) {
2454 int found; 3193 int found;
2455 3194
3195 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
3196 intel_dp_init(dev, DP_A);
3197
2456 if (I915_READ(HDMIB) & PORT_DETECTED) { 3198 if (I915_READ(HDMIB) & PORT_DETECTED) {
2457 /* check SDVOB */ 3199 /* check SDVOB */
2458 /* found = intel_sdvo_init(dev, HDMIB); */ 3200 /* found = intel_sdvo_init(dev, HDMIB); */
2459 found = 0; 3201 found = 0;
2460 if (!found) 3202 if (!found)
2461 intel_hdmi_init(dev, HDMIB); 3203 intel_hdmi_init(dev, HDMIB);
3204 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
3205 intel_dp_init(dev, PCH_DP_B);
2462 } 3206 }
2463 3207
2464 if (I915_READ(HDMIC) & PORT_DETECTED) 3208 if (I915_READ(HDMIC) & PORT_DETECTED)
@@ -2467,6 +3211,12 @@ static void intel_setup_outputs(struct drm_device *dev)
2467 if (I915_READ(HDMID) & PORT_DETECTED) 3211 if (I915_READ(HDMID) & PORT_DETECTED)
2468 intel_hdmi_init(dev, HDMID); 3212 intel_hdmi_init(dev, HDMID);
2469 3213
3214 if (I915_READ(PCH_DP_C) & DP_DETECTED)
3215 intel_dp_init(dev, PCH_DP_C);
3216
3217 if (I915_READ(PCH_DP_D) & DP_DETECTED)
3218 intel_dp_init(dev, PCH_DP_D);
3219
2470 } else if (IS_I9XX(dev)) { 3220 } else if (IS_I9XX(dev)) {
2471 int found; 3221 int found;
2472 u32 reg; 3222 u32 reg;
@@ -2475,6 +3225,8 @@ static void intel_setup_outputs(struct drm_device *dev)
2475 found = intel_sdvo_init(dev, SDVOB); 3225 found = intel_sdvo_init(dev, SDVOB);
2476 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 3226 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
2477 intel_hdmi_init(dev, SDVOB); 3227 intel_hdmi_init(dev, SDVOB);
3228 if (!found && SUPPORTS_INTEGRATED_DP(dev))
3229 intel_dp_init(dev, DP_B);
2478 } 3230 }
2479 3231
2480 /* Before G4X SDVOC doesn't have its own detect register */ 3232 /* Before G4X SDVOC doesn't have its own detect register */
@@ -2487,7 +3239,11 @@ static void intel_setup_outputs(struct drm_device *dev)
2487 found = intel_sdvo_init(dev, SDVOC); 3239 found = intel_sdvo_init(dev, SDVOC);
2488 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 3240 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
2489 intel_hdmi_init(dev, SDVOC); 3241 intel_hdmi_init(dev, SDVOC);
3242 if (!found && SUPPORTS_INTEGRATED_DP(dev))
3243 intel_dp_init(dev, DP_C);
2490 } 3244 }
3245 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
3246 intel_dp_init(dev, DP_D);
2491 } else 3247 } else
2492 intel_dvo_init(dev); 3248 intel_dvo_init(dev);
2493 3249
@@ -2530,6 +3286,15 @@ static void intel_setup_outputs(struct drm_device *dev)
2530 (1 << 1)); 3286 (1 << 1));
2531 clone_mask = (1 << INTEL_OUTPUT_TVOUT); 3287 clone_mask = (1 << INTEL_OUTPUT_TVOUT);
2532 break; 3288 break;
3289 case INTEL_OUTPUT_DISPLAYPORT:
3290 crtc_mask = ((1 << 0) |
3291 (1 << 1));
3292 clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
3293 break;
3294 case INTEL_OUTPUT_EDP:
3295 crtc_mask = (1 << 1);
3296 clone_mask = (1 << INTEL_OUTPUT_EDP);
3297 break;
2533 } 3298 }
2534 encoder->possible_crtcs = crtc_mask; 3299 encoder->possible_crtcs = crtc_mask;
2535 encoder->possible_clones = intel_connector_clones(dev, clone_mask); 3300 encoder->possible_clones = intel_connector_clones(dev, clone_mask);
@@ -2639,6 +3404,9 @@ void intel_modeset_init(struct drm_device *dev)
2639 if (IS_I965G(dev)) { 3404 if (IS_I965G(dev)) {
2640 dev->mode_config.max_width = 8192; 3405 dev->mode_config.max_width = 8192;
2641 dev->mode_config.max_height = 8192; 3406 dev->mode_config.max_height = 8192;
3407 } else if (IS_I9XX(dev)) {
3408 dev->mode_config.max_width = 4096;
3409 dev->mode_config.max_height = 4096;
2642 } else { 3410 } else {
2643 dev->mode_config.max_width = 2048; 3411 dev->mode_config.max_width = 2048;
2644 dev->mode_config.max_height = 2048; 3412 dev->mode_config.max_height = 2048;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
new file mode 100644
index 000000000000..a6ff15ac548a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -0,0 +1,1318 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include "drmP.h"
30#include "drm.h"
31#include "drm_crtc.h"
32#include "drm_crtc_helper.h"
33#include "intel_drv.h"
34#include "i915_drm.h"
35#include "i915_drv.h"
36#include "intel_dp.h"
37
38#define DP_LINK_STATUS_SIZE 6
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
41#define DP_LINK_CONFIGURATION_SIZE 9
42
43#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
44
45struct intel_dp_priv {
46 uint32_t output_reg;
47 uint32_t DP;
48 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
49 uint32_t save_DP;
50 uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
51 bool has_audio;
52 int dpms_mode;
53 uint8_t link_bw;
54 uint8_t lane_count;
55 uint8_t dpcd[4];
56 struct intel_output *intel_output;
57 struct i2c_adapter adapter;
58 struct i2c_algo_dp_aux_data algo;
59};
60
61static void
62intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
63 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
64
65static void
66intel_dp_link_down(struct intel_output *intel_output, uint32_t DP);
67
68void
69intel_edp_link_config (struct intel_output *intel_output,
70 int *lane_num, int *link_bw)
71{
72 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
73
74 *lane_num = dp_priv->lane_count;
75 if (dp_priv->link_bw == DP_LINK_BW_1_62)
76 *link_bw = 162000;
77 else if (dp_priv->link_bw == DP_LINK_BW_2_7)
78 *link_bw = 270000;
79}
80
81static int
82intel_dp_max_lane_count(struct intel_output *intel_output)
83{
84 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
85 int max_lane_count = 4;
86
87 if (dp_priv->dpcd[0] >= 0x11) {
88 max_lane_count = dp_priv->dpcd[2] & 0x1f;
89 switch (max_lane_count) {
90 case 1: case 2: case 4:
91 break;
92 default:
93 max_lane_count = 4;
94 }
95 }
96 return max_lane_count;
97}
98
99static int
100intel_dp_max_link_bw(struct intel_output *intel_output)
101{
102 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
103 int max_link_bw = dp_priv->dpcd[1];
104
105 switch (max_link_bw) {
106 case DP_LINK_BW_1_62:
107 case DP_LINK_BW_2_7:
108 break;
109 default:
110 max_link_bw = DP_LINK_BW_1_62;
111 break;
112 }
113 return max_link_bw;
114}
115
116static int
117intel_dp_link_clock(uint8_t link_bw)
118{
119 if (link_bw == DP_LINK_BW_2_7)
120 return 270000;
121 else
122 return 162000;
123}
124
125/* I think this is a fiction */
126static int
127intel_dp_link_required(int pixel_clock)
128{
129 return pixel_clock * 3;
130}
131
132static int
133intel_dp_mode_valid(struct drm_connector *connector,
134 struct drm_display_mode *mode)
135{
136 struct intel_output *intel_output = to_intel_output(connector);
137 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
138 int max_lanes = intel_dp_max_lane_count(intel_output);
139
140 if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
141 return MODE_CLOCK_HIGH;
142
143 if (mode->clock < 10000)
144 return MODE_CLOCK_LOW;
145
146 return MODE_OK;
147}
148
149static uint32_t
150pack_aux(uint8_t *src, int src_bytes)
151{
152 int i;
153 uint32_t v = 0;
154
155 if (src_bytes > 4)
156 src_bytes = 4;
157 for (i = 0; i < src_bytes; i++)
158 v |= ((uint32_t) src[i]) << ((3-i) * 8);
159 return v;
160}
161
162static void
163unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
164{
165 int i;
166 if (dst_bytes > 4)
167 dst_bytes = 4;
168 for (i = 0; i < dst_bytes; i++)
169 dst[i] = src >> ((3-i) * 8);
170}
171
172/* hrawclock is 1/4 the FSB frequency */
173static int
174intel_hrawclk(struct drm_device *dev)
175{
176 struct drm_i915_private *dev_priv = dev->dev_private;
177 uint32_t clkcfg;
178
179 clkcfg = I915_READ(CLKCFG);
180 switch (clkcfg & CLKCFG_FSB_MASK) {
181 case CLKCFG_FSB_400:
182 return 100;
183 case CLKCFG_FSB_533:
184 return 133;
185 case CLKCFG_FSB_667:
186 return 166;
187 case CLKCFG_FSB_800:
188 return 200;
189 case CLKCFG_FSB_1067:
190 return 266;
191 case CLKCFG_FSB_1333:
192 return 333;
193 /* these two are just a guess; one of them might be right */
194 case CLKCFG_FSB_1600:
195 case CLKCFG_FSB_1600_ALT:
196 return 400;
197 default:
198 return 133;
199 }
200}
201
202static int
203intel_dp_aux_ch(struct intel_output *intel_output,
204 uint8_t *send, int send_bytes,
205 uint8_t *recv, int recv_size)
206{
207 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
208 uint32_t output_reg = dp_priv->output_reg;
209 struct drm_device *dev = intel_output->base.dev;
210 struct drm_i915_private *dev_priv = dev->dev_private;
211 uint32_t ch_ctl = output_reg + 0x10;
212 uint32_t ch_data = ch_ctl + 4;
213 int i;
214 int recv_bytes;
215 uint32_t ctl;
216 uint32_t status;
217 uint32_t aux_clock_divider;
218 int try;
219
220 /* The clock divider is based off the hrawclk,
221 * and would like to run at 2MHz. So, take the
222 * hrawclk value and divide by 2 and use that
223 */
224 if (IS_eDP(intel_output))
225 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
226 else if (IS_IGDNG(dev))
227 aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */
228 else
229 aux_clock_divider = intel_hrawclk(dev) / 2;
230
231 /* Must try at least 3 times according to DP spec */
232 for (try = 0; try < 5; try++) {
233 /* Load the send data into the aux channel data registers */
234 for (i = 0; i < send_bytes; i += 4) {
235 uint32_t d = pack_aux(send + i, send_bytes - i);;
236
237 I915_WRITE(ch_data + i, d);
238 }
239
240 ctl = (DP_AUX_CH_CTL_SEND_BUSY |
241 DP_AUX_CH_CTL_TIME_OUT_400us |
242 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
243 (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
244 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
245 DP_AUX_CH_CTL_DONE |
246 DP_AUX_CH_CTL_TIME_OUT_ERROR |
247 DP_AUX_CH_CTL_RECEIVE_ERROR);
248
249 /* Send the command and wait for it to complete */
250 I915_WRITE(ch_ctl, ctl);
251 (void) I915_READ(ch_ctl);
252 for (;;) {
253 udelay(100);
254 status = I915_READ(ch_ctl);
255 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
256 break;
257 }
258
259 /* Clear done status and any errors */
260 I915_WRITE(ch_ctl, (status |
261 DP_AUX_CH_CTL_DONE |
262 DP_AUX_CH_CTL_TIME_OUT_ERROR |
263 DP_AUX_CH_CTL_RECEIVE_ERROR));
264 (void) I915_READ(ch_ctl);
265 if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
266 break;
267 }
268
269 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
270 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
271 return -EBUSY;
272 }
273
274 /* Check for timeout or receive error.
275 * Timeouts occur when the sink is not connected
276 */
277 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
278 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
279 return -EIO;
280 }
281
282 /* Timeouts occur when the device isn't connected, so they're
283 * "normal" -- don't fill the kernel log with these */
284 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
285 DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status);
286 return -ETIMEDOUT;
287 }
288
289 /* Unload any bytes sent back from the other side */
290 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
291 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
292
293 if (recv_bytes > recv_size)
294 recv_bytes = recv_size;
295
296 for (i = 0; i < recv_bytes; i += 4) {
297 uint32_t d = I915_READ(ch_data + i);
298
299 unpack_aux(d, recv + i, recv_bytes - i);
300 }
301
302 return recv_bytes;
303}
304
305/* Write data to the aux channel in native mode */
306static int
307intel_dp_aux_native_write(struct intel_output *intel_output,
308 uint16_t address, uint8_t *send, int send_bytes)
309{
310 int ret;
311 uint8_t msg[20];
312 int msg_bytes;
313 uint8_t ack;
314
315 if (send_bytes > 16)
316 return -1;
317 msg[0] = AUX_NATIVE_WRITE << 4;
318 msg[1] = address >> 8;
319 msg[2] = address & 0xff;
320 msg[3] = send_bytes - 1;
321 memcpy(&msg[4], send, send_bytes);
322 msg_bytes = send_bytes + 4;
323 for (;;) {
324 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1);
325 if (ret < 0)
326 return ret;
327 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
328 break;
329 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
330 udelay(100);
331 else
332 return -EIO;
333 }
334 return send_bytes;
335}
336
337/* Write a single byte to the aux channel in native mode */
338static int
339intel_dp_aux_native_write_1(struct intel_output *intel_output,
340 uint16_t address, uint8_t byte)
341{
342 return intel_dp_aux_native_write(intel_output, address, &byte, 1);
343}
344
345/* read bytes from a native aux channel */
346static int
347intel_dp_aux_native_read(struct intel_output *intel_output,
348 uint16_t address, uint8_t *recv, int recv_bytes)
349{
350 uint8_t msg[4];
351 int msg_bytes;
352 uint8_t reply[20];
353 int reply_bytes;
354 uint8_t ack;
355 int ret;
356
357 msg[0] = AUX_NATIVE_READ << 4;
358 msg[1] = address >> 8;
359 msg[2] = address & 0xff;
360 msg[3] = recv_bytes - 1;
361
362 msg_bytes = 4;
363 reply_bytes = recv_bytes + 1;
364
365 for (;;) {
366 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes,
367 reply, reply_bytes);
368 if (ret == 0)
369 return -EPROTO;
370 if (ret < 0)
371 return ret;
372 ack = reply[0];
373 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
374 memcpy(recv, reply + 1, ret - 1);
375 return ret - 1;
376 }
377 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
378 udelay(100);
379 else
380 return -EIO;
381 }
382}
383
384static int
385intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
386 uint8_t *send, int send_bytes,
387 uint8_t *recv, int recv_bytes)
388{
389 struct intel_dp_priv *dp_priv = container_of(adapter,
390 struct intel_dp_priv,
391 adapter);
392 struct intel_output *intel_output = dp_priv->intel_output;
393
394 return intel_dp_aux_ch(intel_output,
395 send, send_bytes, recv, recv_bytes);
396}
397
398static int
399intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
400{
401 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
402
403 DRM_ERROR("i2c_init %s\n", name);
404 dp_priv->algo.running = false;
405 dp_priv->algo.address = 0;
406 dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
407
408 memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
409 dp_priv->adapter.owner = THIS_MODULE;
410 dp_priv->adapter.class = I2C_CLASS_DDC;
411 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
412 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
413 dp_priv->adapter.algo_data = &dp_priv->algo;
414 dp_priv->adapter.dev.parent = &intel_output->base.kdev;
415
416 return i2c_dp_aux_add_bus(&dp_priv->adapter);
417}
418
419static bool
420intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
421 struct drm_display_mode *adjusted_mode)
422{
423 struct intel_output *intel_output = enc_to_intel_output(encoder);
424 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
425 int lane_count, clock;
426 int max_lane_count = intel_dp_max_lane_count(intel_output);
427 int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0;
428 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
429
430 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
431 for (clock = 0; clock <= max_clock; clock++) {
432 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
433
434 if (intel_dp_link_required(mode->clock) <= link_avail) {
435 dp_priv->link_bw = bws[clock];
436 dp_priv->lane_count = lane_count;
437 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
438 DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n",
439 dp_priv->link_bw, dp_priv->lane_count,
440 adjusted_mode->clock);
441 return true;
442 }
443 }
444 }
445 return false;
446}
447
448struct intel_dp_m_n {
449 uint32_t tu;
450 uint32_t gmch_m;
451 uint32_t gmch_n;
452 uint32_t link_m;
453 uint32_t link_n;
454};
455
456static void
457intel_reduce_ratio(uint32_t *num, uint32_t *den)
458{
459 while (*num > 0xffffff || *den > 0xffffff) {
460 *num >>= 1;
461 *den >>= 1;
462 }
463}
464
465static void
466intel_dp_compute_m_n(int bytes_per_pixel,
467 int nlanes,
468 int pixel_clock,
469 int link_clock,
470 struct intel_dp_m_n *m_n)
471{
472 m_n->tu = 64;
473 m_n->gmch_m = pixel_clock * bytes_per_pixel;
474 m_n->gmch_n = link_clock * nlanes;
475 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
476 m_n->link_m = pixel_clock;
477 m_n->link_n = link_clock;
478 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
479}
480
481void
482intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
483 struct drm_display_mode *adjusted_mode)
484{
485 struct drm_device *dev = crtc->dev;
486 struct drm_mode_config *mode_config = &dev->mode_config;
487 struct drm_connector *connector;
488 struct drm_i915_private *dev_priv = dev->dev_private;
489 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
490 int lane_count = 4;
491 struct intel_dp_m_n m_n;
492
493 /*
494 * Find the lane count in the intel_output private
495 */
496 list_for_each_entry(connector, &mode_config->connector_list, head) {
497 struct intel_output *intel_output = to_intel_output(connector);
498 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
499
500 if (!connector->encoder || connector->encoder->crtc != crtc)
501 continue;
502
503 if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) {
504 lane_count = dp_priv->lane_count;
505 break;
506 }
507 }
508
509 /*
510 * Compute the GMCH and Link ratios. The '3' here is
511 * the number of bytes_per_pixel post-LUT, which we always
512 * set up for 8-bits of R/G/B, or 3 bytes total.
513 */
514 intel_dp_compute_m_n(3, lane_count,
515 mode->clock, adjusted_mode->clock, &m_n);
516
517 if (IS_IGDNG(dev)) {
518 if (intel_crtc->pipe == 0) {
519 I915_WRITE(TRANSA_DATA_M1,
520 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
521 m_n.gmch_m);
522 I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n);
523 I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m);
524 I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n);
525 } else {
526 I915_WRITE(TRANSB_DATA_M1,
527 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
528 m_n.gmch_m);
529 I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n);
530 I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m);
531 I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n);
532 }
533 } else {
534 if (intel_crtc->pipe == 0) {
535 I915_WRITE(PIPEA_GMCH_DATA_M,
536 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
537 m_n.gmch_m);
538 I915_WRITE(PIPEA_GMCH_DATA_N,
539 m_n.gmch_n);
540 I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
541 I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
542 } else {
543 I915_WRITE(PIPEB_GMCH_DATA_M,
544 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
545 m_n.gmch_m);
546 I915_WRITE(PIPEB_GMCH_DATA_N,
547 m_n.gmch_n);
548 I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
549 I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
550 }
551 }
552}
553
554static void
555intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
556 struct drm_display_mode *adjusted_mode)
557{
558 struct intel_output *intel_output = enc_to_intel_output(encoder);
559 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
560 struct drm_crtc *crtc = intel_output->enc.crtc;
561 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
562
563 dp_priv->DP = (DP_LINK_TRAIN_OFF |
564 DP_VOLTAGE_0_4 |
565 DP_PRE_EMPHASIS_0 |
566 DP_SYNC_VS_HIGH |
567 DP_SYNC_HS_HIGH);
568
569 switch (dp_priv->lane_count) {
570 case 1:
571 dp_priv->DP |= DP_PORT_WIDTH_1;
572 break;
573 case 2:
574 dp_priv->DP |= DP_PORT_WIDTH_2;
575 break;
576 case 4:
577 dp_priv->DP |= DP_PORT_WIDTH_4;
578 break;
579 }
580 if (dp_priv->has_audio)
581 dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
582
583 memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
584 dp_priv->link_configuration[0] = dp_priv->link_bw;
585 dp_priv->link_configuration[1] = dp_priv->lane_count;
586
587 /*
588 * Check for DPCD version > 1.1,
589 * enable enahanced frame stuff in that case
590 */
591 if (dp_priv->dpcd[0] >= 0x11) {
592 dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
593 dp_priv->DP |= DP_ENHANCED_FRAMING;
594 }
595
596 if (intel_crtc->pipe == 1)
597 dp_priv->DP |= DP_PIPEB_SELECT;
598
599 if (IS_eDP(intel_output)) {
600 /* don't miss out required setting for eDP */
601 dp_priv->DP |= DP_PLL_ENABLE;
602 if (adjusted_mode->clock < 200000)
603 dp_priv->DP |= DP_PLL_FREQ_160MHZ;
604 else
605 dp_priv->DP |= DP_PLL_FREQ_270MHZ;
606 }
607}
608
609static void igdng_edp_backlight_on (struct drm_device *dev)
610{
611 struct drm_i915_private *dev_priv = dev->dev_private;
612 u32 pp;
613
614 DRM_DEBUG("\n");
615 pp = I915_READ(PCH_PP_CONTROL);
616 pp |= EDP_BLC_ENABLE;
617 I915_WRITE(PCH_PP_CONTROL, pp);
618}
619
620static void igdng_edp_backlight_off (struct drm_device *dev)
621{
622 struct drm_i915_private *dev_priv = dev->dev_private;
623 u32 pp;
624
625 DRM_DEBUG("\n");
626 pp = I915_READ(PCH_PP_CONTROL);
627 pp &= ~EDP_BLC_ENABLE;
628 I915_WRITE(PCH_PP_CONTROL, pp);
629}
630
631static void
632intel_dp_dpms(struct drm_encoder *encoder, int mode)
633{
634 struct intel_output *intel_output = enc_to_intel_output(encoder);
635 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
636 struct drm_device *dev = intel_output->base.dev;
637 struct drm_i915_private *dev_priv = dev->dev_private;
638 uint32_t dp_reg = I915_READ(dp_priv->output_reg);
639
640 if (mode != DRM_MODE_DPMS_ON) {
641 if (dp_reg & DP_PORT_EN) {
642 intel_dp_link_down(intel_output, dp_priv->DP);
643 if (IS_eDP(intel_output))
644 igdng_edp_backlight_off(dev);
645 }
646 } else {
647 if (!(dp_reg & DP_PORT_EN)) {
648 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
649 if (IS_eDP(intel_output))
650 igdng_edp_backlight_on(dev);
651 }
652 }
653 dp_priv->dpms_mode = mode;
654}
655
656/*
657 * Fetch AUX CH registers 0x202 - 0x207 which contain
658 * link status information
659 */
660static bool
661intel_dp_get_link_status(struct intel_output *intel_output,
662 uint8_t link_status[DP_LINK_STATUS_SIZE])
663{
664 int ret;
665
666 ret = intel_dp_aux_native_read(intel_output,
667 DP_LANE0_1_STATUS,
668 link_status, DP_LINK_STATUS_SIZE);
669 if (ret != DP_LINK_STATUS_SIZE)
670 return false;
671 return true;
672}
673
674static uint8_t
675intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
676 int r)
677{
678 return link_status[r - DP_LANE0_1_STATUS];
679}
680
681static void
682intel_dp_save(struct drm_connector *connector)
683{
684 struct intel_output *intel_output = to_intel_output(connector);
685 struct drm_device *dev = intel_output->base.dev;
686 struct drm_i915_private *dev_priv = dev->dev_private;
687 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
688
689 dp_priv->save_DP = I915_READ(dp_priv->output_reg);
690 intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET,
691 dp_priv->save_link_configuration,
692 sizeof (dp_priv->save_link_configuration));
693}
694
695static uint8_t
696intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
697 int lane)
698{
699 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
700 int s = ((lane & 1) ?
701 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
702 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
703 uint8_t l = intel_dp_link_status(link_status, i);
704
705 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
706}
707
708static uint8_t
709intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
710 int lane)
711{
712 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
713 int s = ((lane & 1) ?
714 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
715 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
716 uint8_t l = intel_dp_link_status(link_status, i);
717
718 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
719}
720
721
722#if 0
723static char *voltage_names[] = {
724 "0.4V", "0.6V", "0.8V", "1.2V"
725};
726static char *pre_emph_names[] = {
727 "0dB", "3.5dB", "6dB", "9.5dB"
728};
729static char *link_train_names[] = {
730 "pattern 1", "pattern 2", "idle", "off"
731};
732#endif
733
734/*
735 * These are source-specific values; current Intel hardware supports
736 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
737 */
738#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
739
740static uint8_t
741intel_dp_pre_emphasis_max(uint8_t voltage_swing)
742{
743 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
744 case DP_TRAIN_VOLTAGE_SWING_400:
745 return DP_TRAIN_PRE_EMPHASIS_6;
746 case DP_TRAIN_VOLTAGE_SWING_600:
747 return DP_TRAIN_PRE_EMPHASIS_6;
748 case DP_TRAIN_VOLTAGE_SWING_800:
749 return DP_TRAIN_PRE_EMPHASIS_3_5;
750 case DP_TRAIN_VOLTAGE_SWING_1200:
751 default:
752 return DP_TRAIN_PRE_EMPHASIS_0;
753 }
754}
755
756static void
757intel_get_adjust_train(struct intel_output *intel_output,
758 uint8_t link_status[DP_LINK_STATUS_SIZE],
759 int lane_count,
760 uint8_t train_set[4])
761{
762 uint8_t v = 0;
763 uint8_t p = 0;
764 int lane;
765
766 for (lane = 0; lane < lane_count; lane++) {
767 uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
768 uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
769
770 if (this_v > v)
771 v = this_v;
772 if (this_p > p)
773 p = this_p;
774 }
775
776 if (v >= I830_DP_VOLTAGE_MAX)
777 v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
778
779 if (p >= intel_dp_pre_emphasis_max(v))
780 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
781
782 for (lane = 0; lane < 4; lane++)
783 train_set[lane] = v | p;
784}
785
786static uint32_t
787intel_dp_signal_levels(uint8_t train_set, int lane_count)
788{
789 uint32_t signal_levels = 0;
790
791 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
792 case DP_TRAIN_VOLTAGE_SWING_400:
793 default:
794 signal_levels |= DP_VOLTAGE_0_4;
795 break;
796 case DP_TRAIN_VOLTAGE_SWING_600:
797 signal_levels |= DP_VOLTAGE_0_6;
798 break;
799 case DP_TRAIN_VOLTAGE_SWING_800:
800 signal_levels |= DP_VOLTAGE_0_8;
801 break;
802 case DP_TRAIN_VOLTAGE_SWING_1200:
803 signal_levels |= DP_VOLTAGE_1_2;
804 break;
805 }
806 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
807 case DP_TRAIN_PRE_EMPHASIS_0:
808 default:
809 signal_levels |= DP_PRE_EMPHASIS_0;
810 break;
811 case DP_TRAIN_PRE_EMPHASIS_3_5:
812 signal_levels |= DP_PRE_EMPHASIS_3_5;
813 break;
814 case DP_TRAIN_PRE_EMPHASIS_6:
815 signal_levels |= DP_PRE_EMPHASIS_6;
816 break;
817 case DP_TRAIN_PRE_EMPHASIS_9_5:
818 signal_levels |= DP_PRE_EMPHASIS_9_5;
819 break;
820 }
821 return signal_levels;
822}
823
824static uint8_t
825intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
826 int lane)
827{
828 int i = DP_LANE0_1_STATUS + (lane >> 1);
829 int s = (lane & 1) * 4;
830 uint8_t l = intel_dp_link_status(link_status, i);
831
832 return (l >> s) & 0xf;
833}
834
835/* Check for clock recovery is done on all channels */
836static bool
837intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
838{
839 int lane;
840 uint8_t lane_status;
841
842 for (lane = 0; lane < lane_count; lane++) {
843 lane_status = intel_get_lane_status(link_status, lane);
844 if ((lane_status & DP_LANE_CR_DONE) == 0)
845 return false;
846 }
847 return true;
848}
849
850/* Check to see if channel eq is done on all channels */
851#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
852 DP_LANE_CHANNEL_EQ_DONE|\
853 DP_LANE_SYMBOL_LOCKED)
854static bool
855intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
856{
857 uint8_t lane_align;
858 uint8_t lane_status;
859 int lane;
860
861 lane_align = intel_dp_link_status(link_status,
862 DP_LANE_ALIGN_STATUS_UPDATED);
863 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
864 return false;
865 for (lane = 0; lane < lane_count; lane++) {
866 lane_status = intel_get_lane_status(link_status, lane);
867 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
868 return false;
869 }
870 return true;
871}
872
873static bool
874intel_dp_set_link_train(struct intel_output *intel_output,
875 uint32_t dp_reg_value,
876 uint8_t dp_train_pat,
877 uint8_t train_set[4],
878 bool first)
879{
880 struct drm_device *dev = intel_output->base.dev;
881 struct drm_i915_private *dev_priv = dev->dev_private;
882 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
883 int ret;
884
885 I915_WRITE(dp_priv->output_reg, dp_reg_value);
886 POSTING_READ(dp_priv->output_reg);
887 if (first)
888 intel_wait_for_vblank(dev);
889
890 intel_dp_aux_native_write_1(intel_output,
891 DP_TRAINING_PATTERN_SET,
892 dp_train_pat);
893
894 ret = intel_dp_aux_native_write(intel_output,
895 DP_TRAINING_LANE0_SET, train_set, 4);
896 if (ret != 4)
897 return false;
898
899 return true;
900}
901
902static void
903intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
904 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
905{
906 struct drm_device *dev = intel_output->base.dev;
907 struct drm_i915_private *dev_priv = dev->dev_private;
908 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
909 uint8_t train_set[4];
910 uint8_t link_status[DP_LINK_STATUS_SIZE];
911 int i;
912 uint8_t voltage;
913 bool clock_recovery = false;
914 bool channel_eq = false;
915 bool first = true;
916 int tries;
917
918 /* Write the link configuration data */
919 intel_dp_aux_native_write(intel_output, 0x100,
920 link_configuration, DP_LINK_CONFIGURATION_SIZE);
921
922 DP |= DP_PORT_EN;
923 DP &= ~DP_LINK_TRAIN_MASK;
924 memset(train_set, 0, 4);
925 voltage = 0xff;
926 tries = 0;
927 clock_recovery = false;
928 for (;;) {
929 /* Use train_set[0] to set the voltage and pre emphasis values */
930 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
931 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
932
933 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1,
934 DP_TRAINING_PATTERN_1, train_set, first))
935 break;
936 first = false;
937 /* Set training pattern 1 */
938
939 udelay(100);
940 if (!intel_dp_get_link_status(intel_output, link_status))
941 break;
942
943 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
944 clock_recovery = true;
945 break;
946 }
947
948 /* Check to see if we've tried the max voltage */
949 for (i = 0; i < dp_priv->lane_count; i++)
950 if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
951 break;
952 if (i == dp_priv->lane_count)
953 break;
954
955 /* Check to see if we've tried the same voltage 5 times */
956 if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
957 ++tries;
958 if (tries == 5)
959 break;
960 } else
961 tries = 0;
962 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
963
964 /* Compute new train_set as requested by target */
965 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
966 }
967
968 /* channel equalization */
969 tries = 0;
970 channel_eq = false;
971 for (;;) {
972 /* Use train_set[0] to set the voltage and pre emphasis values */
973 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
974 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
975
976 /* channel eq pattern */
977 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2,
978 DP_TRAINING_PATTERN_2, train_set,
979 false))
980 break;
981
982 udelay(400);
983 if (!intel_dp_get_link_status(intel_output, link_status))
984 break;
985
986 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
987 channel_eq = true;
988 break;
989 }
990
991 /* Try 5 times */
992 if (tries > 5)
993 break;
994
995 /* Compute new train_set as requested by target */
996 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
997 ++tries;
998 }
999
1000 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
1001 POSTING_READ(dp_priv->output_reg);
1002 intel_dp_aux_native_write_1(intel_output,
1003 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1004}
1005
1006static void
1007intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1008{
1009 struct drm_device *dev = intel_output->base.dev;
1010 struct drm_i915_private *dev_priv = dev->dev_private;
1011 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
1012
1013 DRM_DEBUG("\n");
1014
1015 if (IS_eDP(intel_output)) {
1016 DP &= ~DP_PLL_ENABLE;
1017 I915_WRITE(dp_priv->output_reg, DP);
1018 POSTING_READ(dp_priv->output_reg);
1019 udelay(100);
1020 }
1021
1022 DP &= ~DP_LINK_TRAIN_MASK;
1023 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1024 POSTING_READ(dp_priv->output_reg);
1025
1026 udelay(17000);
1027
1028 if (IS_eDP(intel_output))
1029 DP |= DP_LINK_TRAIN_OFF;
1030 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
1031 POSTING_READ(dp_priv->output_reg);
1032}
1033
1034static void
1035intel_dp_restore(struct drm_connector *connector)
1036{
1037 struct intel_output *intel_output = to_intel_output(connector);
1038 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
1039
1040 if (dp_priv->save_DP & DP_PORT_EN)
1041 intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration);
1042 else
1043 intel_dp_link_down(intel_output, dp_priv->save_DP);
1044}
1045
1046/*
1047 * According to DP spec
1048 * 5.1.2:
1049 * 1. Read DPCD
1050 * 2. Configure link according to Receiver Capabilities
1051 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
1052 * 4. Check link status on receipt of hot-plug interrupt
1053 */
1054
1055static void
1056intel_dp_check_link_status(struct intel_output *intel_output)
1057{
1058 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
1059 uint8_t link_status[DP_LINK_STATUS_SIZE];
1060
1061 if (!intel_output->enc.crtc)
1062 return;
1063
1064 if (!intel_dp_get_link_status(intel_output, link_status)) {
1065 intel_dp_link_down(intel_output, dp_priv->DP);
1066 return;
1067 }
1068
1069 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
1070 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
1071}
1072
1073static enum drm_connector_status
1074igdng_dp_detect(struct drm_connector *connector)
1075{
1076 struct intel_output *intel_output = to_intel_output(connector);
1077 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
1078 enum drm_connector_status status;
1079
1080 status = connector_status_disconnected;
1081 if (intel_dp_aux_native_read(intel_output,
1082 0x000, dp_priv->dpcd,
1083 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1084 {
1085 if (dp_priv->dpcd[0] != 0)
1086 status = connector_status_connected;
1087 }
1088 return status;
1089}
1090
1091/**
1092 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
1093 *
1094 * \return true if DP port is connected.
1095 * \return false if DP port is disconnected.
1096 */
1097static enum drm_connector_status
1098intel_dp_detect(struct drm_connector *connector)
1099{
1100 struct intel_output *intel_output = to_intel_output(connector);
1101 struct drm_device *dev = intel_output->base.dev;
1102 struct drm_i915_private *dev_priv = dev->dev_private;
1103 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
1104 uint32_t temp, bit;
1105 enum drm_connector_status status;
1106
1107 dp_priv->has_audio = false;
1108
1109 if (IS_IGDNG(dev))
1110 return igdng_dp_detect(connector);
1111
1112 temp = I915_READ(PORT_HOTPLUG_EN);
1113
1114 I915_WRITE(PORT_HOTPLUG_EN,
1115 temp |
1116 DPB_HOTPLUG_INT_EN |
1117 DPC_HOTPLUG_INT_EN |
1118 DPD_HOTPLUG_INT_EN);
1119
1120 POSTING_READ(PORT_HOTPLUG_EN);
1121
1122 switch (dp_priv->output_reg) {
1123 case DP_B:
1124 bit = DPB_HOTPLUG_INT_STATUS;
1125 break;
1126 case DP_C:
1127 bit = DPC_HOTPLUG_INT_STATUS;
1128 break;
1129 case DP_D:
1130 bit = DPD_HOTPLUG_INT_STATUS;
1131 break;
1132 default:
1133 return connector_status_unknown;
1134 }
1135
1136 temp = I915_READ(PORT_HOTPLUG_STAT);
1137
1138 if ((temp & bit) == 0)
1139 return connector_status_disconnected;
1140
1141 status = connector_status_disconnected;
1142 if (intel_dp_aux_native_read(intel_output,
1143 0x000, dp_priv->dpcd,
1144 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1145 {
1146 if (dp_priv->dpcd[0] != 0)
1147 status = connector_status_connected;
1148 }
1149 return status;
1150}
1151
1152static int intel_dp_get_modes(struct drm_connector *connector)
1153{
1154 struct intel_output *intel_output = to_intel_output(connector);
1155 struct drm_device *dev = intel_output->base.dev;
1156 struct drm_i915_private *dev_priv = dev->dev_private;
1157 int ret;
1158
1159 /* We should parse the EDID data and find out if it has an audio sink
1160 */
1161
1162 ret = intel_ddc_get_modes(intel_output);
1163 if (ret)
1164 return ret;
1165
1166 /* if eDP has no EDID, try to use fixed panel mode from VBT */
1167 if (IS_eDP(intel_output)) {
1168 if (dev_priv->panel_fixed_mode != NULL) {
1169 struct drm_display_mode *mode;
1170 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
1171 drm_mode_probed_add(connector, mode);
1172 return 1;
1173 }
1174 }
1175 return 0;
1176}
1177
1178static void
1179intel_dp_destroy (struct drm_connector *connector)
1180{
1181 struct intel_output *intel_output = to_intel_output(connector);
1182
1183 if (intel_output->i2c_bus)
1184 intel_i2c_destroy(intel_output->i2c_bus);
1185 drm_sysfs_connector_remove(connector);
1186 drm_connector_cleanup(connector);
1187 kfree(intel_output);
1188}
1189
1190static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
1191 .dpms = intel_dp_dpms,
1192 .mode_fixup = intel_dp_mode_fixup,
1193 .prepare = intel_encoder_prepare,
1194 .mode_set = intel_dp_mode_set,
1195 .commit = intel_encoder_commit,
1196};
1197
1198static const struct drm_connector_funcs intel_dp_connector_funcs = {
1199 .dpms = drm_helper_connector_dpms,
1200 .save = intel_dp_save,
1201 .restore = intel_dp_restore,
1202 .detect = intel_dp_detect,
1203 .fill_modes = drm_helper_probe_single_connector_modes,
1204 .destroy = intel_dp_destroy,
1205};
1206
1207static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
1208 .get_modes = intel_dp_get_modes,
1209 .mode_valid = intel_dp_mode_valid,
1210 .best_encoder = intel_best_encoder,
1211};
1212
1213static void intel_dp_enc_destroy(struct drm_encoder *encoder)
1214{
1215 drm_encoder_cleanup(encoder);
1216}
1217
1218static const struct drm_encoder_funcs intel_dp_enc_funcs = {
1219 .destroy = intel_dp_enc_destroy,
1220};
1221
1222void
1223intel_dp_hot_plug(struct intel_output *intel_output)
1224{
1225 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
1226
1227 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1228 intel_dp_check_link_status(intel_output);
1229}
1230
1231void
1232intel_dp_init(struct drm_device *dev, int output_reg)
1233{
1234 struct drm_i915_private *dev_priv = dev->dev_private;
1235 struct drm_connector *connector;
1236 struct intel_output *intel_output;
1237 struct intel_dp_priv *dp_priv;
1238 const char *name = NULL;
1239
1240 intel_output = kcalloc(sizeof(struct intel_output) +
1241 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1242 if (!intel_output)
1243 return;
1244
1245 dp_priv = (struct intel_dp_priv *)(intel_output + 1);
1246
1247 connector = &intel_output->base;
1248 drm_connector_init(dev, connector, &intel_dp_connector_funcs,
1249 DRM_MODE_CONNECTOR_DisplayPort);
1250 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
1251
1252 if (output_reg == DP_A)
1253 intel_output->type = INTEL_OUTPUT_EDP;
1254 else
1255 intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
1256
1257 connector->interlace_allowed = true;
1258 connector->doublescan_allowed = 0;
1259
1260 dp_priv->intel_output = intel_output;
1261 dp_priv->output_reg = output_reg;
1262 dp_priv->has_audio = false;
1263 dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
1264 intel_output->dev_priv = dp_priv;
1265
1266 drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs,
1267 DRM_MODE_ENCODER_TMDS);
1268 drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs);
1269
1270 drm_mode_connector_attach_encoder(&intel_output->base,
1271 &intel_output->enc);
1272 drm_sysfs_connector_add(connector);
1273
1274 /* Set up the DDC bus. */
1275 switch (output_reg) {
1276 case DP_A:
1277 name = "DPDDC-A";
1278 break;
1279 case DP_B:
1280 case PCH_DP_B:
1281 name = "DPDDC-B";
1282 break;
1283 case DP_C:
1284 case PCH_DP_C:
1285 name = "DPDDC-C";
1286 break;
1287 case DP_D:
1288 case PCH_DP_D:
1289 name = "DPDDC-D";
1290 break;
1291 }
1292
1293 intel_dp_i2c_init(intel_output, name);
1294
1295 intel_output->ddc_bus = &dp_priv->adapter;
1296 intel_output->hot_plug = intel_dp_hot_plug;
1297
1298 if (output_reg == DP_A) {
1299 /* initialize panel mode from VBT if available for eDP */
1300 if (dev_priv->lfp_lvds_vbt_mode) {
1301 dev_priv->panel_fixed_mode =
1302 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
1303 if (dev_priv->panel_fixed_mode) {
1304 dev_priv->panel_fixed_mode->type |=
1305 DRM_MODE_TYPE_PREFERRED;
1306 }
1307 }
1308 }
1309
1310 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
1311 * 0xd. Failure to do so will result in spurious interrupts being
1312 * generated on the port when a cable is not attached.
1313 */
1314 if (IS_G4X(dev) && !IS_GM45(dev)) {
1315 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
1316 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
1317 }
1318}
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
new file mode 100644
index 000000000000..2b38054d3b6d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -0,0 +1,144 @@
1/*
2 * Copyright © 2008 Keith Packard
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#ifndef _INTEL_DP_H_
24#define _INTEL_DP_H_
25
26/* From the VESA DisplayPort spec */
27
28#define AUX_NATIVE_WRITE 0x8
29#define AUX_NATIVE_READ 0x9
30#define AUX_I2C_WRITE 0x0
31#define AUX_I2C_READ 0x1
32#define AUX_I2C_STATUS 0x2
33#define AUX_I2C_MOT 0x4
34
35#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
36#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
37#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
38#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
39
40#define AUX_I2C_REPLY_ACK (0x0 << 6)
41#define AUX_I2C_REPLY_NACK (0x1 << 6)
42#define AUX_I2C_REPLY_DEFER (0x2 << 6)
43#define AUX_I2C_REPLY_MASK (0x3 << 6)
44
45/* AUX CH addresses */
46#define DP_LINK_BW_SET 0x100
47# define DP_LINK_BW_1_62 0x06
48# define DP_LINK_BW_2_7 0x0a
49
50#define DP_LANE_COUNT_SET 0x101
51# define DP_LANE_COUNT_MASK 0x0f
52# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
53
54#define DP_TRAINING_PATTERN_SET 0x102
55
56# define DP_TRAINING_PATTERN_DISABLE 0
57# define DP_TRAINING_PATTERN_1 1
58# define DP_TRAINING_PATTERN_2 2
59# define DP_TRAINING_PATTERN_MASK 0x3
60
61# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
62# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
63# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
64# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
65# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
66
67# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
68# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
69
70# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
71# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
72# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
73# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
74
75#define DP_TRAINING_LANE0_SET 0x103
76#define DP_TRAINING_LANE1_SET 0x104
77#define DP_TRAINING_LANE2_SET 0x105
78#define DP_TRAINING_LANE3_SET 0x106
79
80# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
81# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
82# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
83# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
84# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
85# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
86# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
87
88# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
89# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
90# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
91# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
92# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
93
94# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
95# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
96
97#define DP_DOWNSPREAD_CTRL 0x107
98# define DP_SPREAD_AMP_0_5 (1 << 4)
99
100#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
101# define DP_SET_ANSI_8B10B (1 << 0)
102
103#define DP_LANE0_1_STATUS 0x202
104#define DP_LANE2_3_STATUS 0x203
105
106# define DP_LANE_CR_DONE (1 << 0)
107# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
108# define DP_LANE_SYMBOL_LOCKED (1 << 2)
109
110#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
111
112#define DP_INTERLANE_ALIGN_DONE (1 << 0)
113#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
114#define DP_LINK_STATUS_UPDATED (1 << 7)
115
116#define DP_SINK_STATUS 0x205
117
118#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
119#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
120
121#define DP_ADJUST_REQUEST_LANE0_1 0x206
122#define DP_ADJUST_REQUEST_LANE2_3 0x207
123
124#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
125#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
126#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
127#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
128#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
129#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
130#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
131#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
132
133struct i2c_algo_dp_aux_data {
134 bool running;
135 u16 address;
136 int (*aux_ch) (struct i2c_adapter *adapter,
137 uint8_t *send, int send_bytes,
138 uint8_t *recv, int recv_bytes);
139};
140
141int
142i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
143
144#endif /* _INTEL_DP_H_ */
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c
new file mode 100644
index 000000000000..a63b6f57d2d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_i2c.c
@@ -0,0 +1,273 @@
1/*
2 * Copyright © 2009 Keith Packard
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/sched.h>
30#include <linux/i2c.h>
31#include "intel_dp.h"
32#include "drmP.h"
33
34/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
35
36#define MODE_I2C_START 1
37#define MODE_I2C_WRITE 2
38#define MODE_I2C_READ 4
39#define MODE_I2C_STOP 8
40
41static int
42i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
43 uint8_t write_byte, uint8_t *read_byte)
44{
45 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
46 uint16_t address = algo_data->address;
47 uint8_t msg[5];
48 uint8_t reply[2];
49 int msg_bytes;
50 int reply_bytes;
51 int ret;
52
53 /* Set up the command byte */
54 if (mode & MODE_I2C_READ)
55 msg[0] = AUX_I2C_READ << 4;
56 else
57 msg[0] = AUX_I2C_WRITE << 4;
58
59 if (!(mode & MODE_I2C_STOP))
60 msg[0] |= AUX_I2C_MOT << 4;
61
62 msg[1] = address >> 8;
63 msg[2] = address;
64
65 switch (mode) {
66 case MODE_I2C_WRITE:
67 msg[3] = 0;
68 msg[4] = write_byte;
69 msg_bytes = 5;
70 reply_bytes = 1;
71 break;
72 case MODE_I2C_READ:
73 msg[3] = 0;
74 msg_bytes = 4;
75 reply_bytes = 2;
76 break;
77 default:
78 msg_bytes = 3;
79 reply_bytes = 1;
80 break;
81 }
82
83 for (;;) {
84 ret = (*algo_data->aux_ch)(adapter,
85 msg, msg_bytes,
86 reply, reply_bytes);
87 if (ret < 0) {
88 DRM_DEBUG("aux_ch failed %d\n", ret);
89 return ret;
90 }
91 switch (reply[0] & AUX_I2C_REPLY_MASK) {
92 case AUX_I2C_REPLY_ACK:
93 if (mode == MODE_I2C_READ) {
94 *read_byte = reply[1];
95 }
96 return reply_bytes - 1;
97 case AUX_I2C_REPLY_NACK:
98 DRM_DEBUG("aux_ch nack\n");
99 return -EREMOTEIO;
100 case AUX_I2C_REPLY_DEFER:
101 DRM_DEBUG("aux_ch defer\n");
102 udelay(100);
103 break;
104 default:
105 DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
106 return -EREMOTEIO;
107 }
108 }
109}
110
111/*
112 * I2C over AUX CH
113 */
114
115/*
116 * Send the address. If the I2C link is running, this 'restarts'
117 * the connection with the new address, this is used for doing
118 * a write followed by a read (as needed for DDC)
119 */
120static int
121i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
122{
123 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
124 int mode = MODE_I2C_START;
125 int ret;
126
127 if (reading)
128 mode |= MODE_I2C_READ;
129 else
130 mode |= MODE_I2C_WRITE;
131 algo_data->address = address;
132 algo_data->running = true;
133 ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
134 return ret;
135}
136
137/*
138 * Stop the I2C transaction. This closes out the link, sending
139 * a bare address packet with the MOT bit turned off
140 */
141static void
142i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
143{
144 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
145 int mode = MODE_I2C_STOP;
146
147 if (reading)
148 mode |= MODE_I2C_READ;
149 else
150 mode |= MODE_I2C_WRITE;
151 if (algo_data->running) {
152 (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
153 algo_data->running = false;
154 }
155}
156
157/*
158 * Write a single byte to the current I2C address, the
159 * the I2C link must be running or this returns -EIO
160 */
161static int
162i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
163{
164 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
165 int ret;
166
167 if (!algo_data->running)
168 return -EIO;
169
170 ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
171 return ret;
172}
173
174/*
175 * Read a single byte from the current I2C address, the
176 * I2C link must be running or this returns -EIO
177 */
178static int
179i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
180{
181 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
182 int ret;
183
184 if (!algo_data->running)
185 return -EIO;
186
187 ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
188 return ret;
189}
190
191static int
192i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
193 struct i2c_msg *msgs,
194 int num)
195{
196 int ret = 0;
197 bool reading = false;
198 int m;
199 int b;
200
201 for (m = 0; m < num; m++) {
202 u16 len = msgs[m].len;
203 u8 *buf = msgs[m].buf;
204 reading = (msgs[m].flags & I2C_M_RD) != 0;
205 ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
206 if (ret < 0)
207 break;
208 if (reading) {
209 for (b = 0; b < len; b++) {
210 ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
211 if (ret < 0)
212 break;
213 }
214 } else {
215 for (b = 0; b < len; b++) {
216 ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
217 if (ret < 0)
218 break;
219 }
220 }
221 if (ret < 0)
222 break;
223 }
224 if (ret >= 0)
225 ret = num;
226 i2c_algo_dp_aux_stop(adapter, reading);
227 DRM_DEBUG("dp_aux_xfer return %d\n", ret);
228 return ret;
229}
230
231static u32
232i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
233{
234 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
235 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
236 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
237 I2C_FUNC_10BIT_ADDR;
238}
239
240static const struct i2c_algorithm i2c_dp_aux_algo = {
241 .master_xfer = i2c_algo_dp_aux_xfer,
242 .functionality = i2c_algo_dp_aux_functionality,
243};
244
245static void
246i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
247{
248 (void) i2c_algo_dp_aux_address(adapter, 0, false);
249 (void) i2c_algo_dp_aux_stop(adapter, false);
250
251}
252
253static int
254i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
255{
256 adapter->algo = &i2c_dp_aux_algo;
257 adapter->retries = 3;
258 i2c_dp_aux_reset_bus(adapter);
259 return 0;
260}
261
262int
263i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
264{
265 int error;
266
267 error = i2c_dp_aux_prepare_bus(adapter);
268 if (error)
269 return error;
270 error = i2c_add_adapter(adapter);
271 return error;
272}
273EXPORT_SYMBOL(i2c_dp_aux_add_bus);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd4b9c5f715e..d6f92ea1b553 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -54,6 +54,8 @@
54#define INTEL_OUTPUT_LVDS 4 54#define INTEL_OUTPUT_LVDS 4
55#define INTEL_OUTPUT_TVOUT 5 55#define INTEL_OUTPUT_TVOUT 5
56#define INTEL_OUTPUT_HDMI 6 56#define INTEL_OUTPUT_HDMI 6
57#define INTEL_OUTPUT_DISPLAYPORT 7
58#define INTEL_OUTPUT_EDP 8
57 59
58#define INTEL_DVO_CHIP_NONE 0 60#define INTEL_DVO_CHIP_NONE 0
59#define INTEL_DVO_CHIP_LVDS 1 61#define INTEL_DVO_CHIP_LVDS 1
@@ -65,7 +67,6 @@ struct intel_i2c_chan {
65 u32 reg; /* GPIO reg */ 67 u32 reg; /* GPIO reg */
66 struct i2c_adapter adapter; 68 struct i2c_adapter adapter;
67 struct i2c_algo_bit_data algo; 69 struct i2c_algo_bit_data algo;
68 u8 slave_addr;
69}; 70};
70 71
71struct intel_framebuffer { 72struct intel_framebuffer {
@@ -79,11 +80,12 @@ struct intel_output {
79 80
80 struct drm_encoder enc; 81 struct drm_encoder enc;
81 int type; 82 int type;
82 struct intel_i2c_chan *i2c_bus; /* for control functions */ 83 struct i2c_adapter *i2c_bus;
83 struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ 84 struct i2c_adapter *ddc_bus;
84 bool load_detect_temp; 85 bool load_detect_temp;
85 bool needs_tv_clock; 86 bool needs_tv_clock;
86 void *dev_priv; 87 void *dev_priv;
88 void (*hot_plug)(struct intel_output *);
87}; 89};
88 90
89struct intel_crtc { 91struct intel_crtc {
@@ -104,9 +106,9 @@ struct intel_crtc {
104#define enc_to_intel_output(x) container_of(x, struct intel_output, enc) 106#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
105#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 107#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
106 108
107struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, 109struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
108 const char *name); 110 const char *name);
109void intel_i2c_destroy(struct intel_i2c_chan *chan); 111void intel_i2c_destroy(struct i2c_adapter *adapter);
110int intel_ddc_get_modes(struct intel_output *intel_output); 112int intel_ddc_get_modes(struct intel_output *intel_output);
111extern bool intel_ddc_probe(struct intel_output *intel_output); 113extern bool intel_ddc_probe(struct intel_output *intel_output);
112void intel_i2c_quirk_set(struct drm_device *dev, bool enable); 114void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
@@ -116,6 +118,12 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
116extern void intel_dvo_init(struct drm_device *dev); 118extern void intel_dvo_init(struct drm_device *dev);
117extern void intel_tv_init(struct drm_device *dev); 119extern void intel_tv_init(struct drm_device *dev);
118extern void intel_lvds_init(struct drm_device *dev); 120extern void intel_lvds_init(struct drm_device *dev);
121extern void intel_dp_init(struct drm_device *dev, int dp_reg);
122void
123intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
124 struct drm_display_mode *adjusted_mode);
125extern void intel_edp_link_config (struct intel_output *, int *, int *);
126
119 127
120extern void intel_crtc_load_lut(struct drm_crtc *crtc); 128extern void intel_crtc_load_lut(struct drm_crtc *crtc);
121extern void intel_encoder_prepare (struct drm_encoder *encoder); 129extern void intel_encoder_prepare (struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 1ee3007d6ec0..13bff20930e8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -384,10 +384,9 @@ void intel_dvo_init(struct drm_device *dev)
384{ 384{
385 struct intel_output *intel_output; 385 struct intel_output *intel_output;
386 struct intel_dvo_device *dvo; 386 struct intel_dvo_device *dvo;
387 struct intel_i2c_chan *i2cbus = NULL; 387 struct i2c_adapter *i2cbus = NULL;
388 int ret = 0; 388 int ret = 0;
389 int i; 389 int i;
390 int gpio_inited = 0;
391 int encoder_type = DRM_MODE_ENCODER_NONE; 390 int encoder_type = DRM_MODE_ENCODER_NONE;
392 intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); 391 intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
393 if (!intel_output) 392 if (!intel_output)
@@ -420,14 +419,11 @@ void intel_dvo_init(struct drm_device *dev)
420 * It appears that everything is on GPIOE except for panels 419 * It appears that everything is on GPIOE except for panels
421 * on i830 laptops, which are on GPIOB (DVOA). 420 * on i830 laptops, which are on GPIOB (DVOA).
422 */ 421 */
423 if (gpio_inited != gpio) { 422 if (i2cbus != NULL)
424 if (i2cbus != NULL) 423 intel_i2c_destroy(i2cbus);
425 intel_i2c_destroy(i2cbus); 424 if (!(i2cbus = intel_i2c_create(dev, gpio,
426 if (!(i2cbus = intel_i2c_create(dev, gpio, 425 gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
427 gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { 426 continue;
428 continue;
429 }
430 gpio_inited = gpio;
431 } 427 }
432 428
433 if (dvo->dev_ops!= NULL) 429 if (dvo->dev_ops!= NULL)
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 1af7d68e3807..1d30802e773e 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -453,7 +453,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
453 size = ALIGN(size, PAGE_SIZE); 453 size = ALIGN(size, PAGE_SIZE);
454 fbo = drm_gem_object_alloc(dev, size); 454 fbo = drm_gem_object_alloc(dev, size);
455 if (!fbo) { 455 if (!fbo) {
456 printk(KERN_ERR "failed to allocate framebuffer\n"); 456 DRM_ERROR("failed to allocate framebuffer\n");
457 ret = -ENOMEM; 457 ret = -ENOMEM;
458 goto out; 458 goto out;
459 } 459 }
@@ -610,8 +610,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
610 par->dev = dev; 610 par->dev = dev;
611 611
612 /* To allow resizeing without swapping buffers */ 612 /* To allow resizeing without swapping buffers */
613 printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, 613 DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
614 intel_fb->base.height, obj_priv->gtt_offset, fbo); 614 intel_fb->base.height, obj_priv->gtt_offset, fbo);
615 615
616 mutex_unlock(&dev->struct_mutex); 616 mutex_unlock(&dev->struct_mutex);
617 return 0; 617 return 0;
@@ -698,13 +698,13 @@ static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *
698 } else 698 } else
699 intelfb_set_par(info); 699 intelfb_set_par(info);
700 700
701 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, 701 DRM_INFO("fb%d: %s frame buffer device\n", info->node,
702 info->fix.id); 702 info->fix.id);
703 703
704 /* Switch back to kernel console on panic */ 704 /* Switch back to kernel console on panic */
705 kernelfb_mode = *modeset; 705 kernelfb_mode = *modeset;
706 atomic_notifier_chain_register(&panic_notifier_list, &paniced); 706 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
707 printk(KERN_INFO "registered panic notifier\n"); 707 DRM_DEBUG("registered panic notifier\n");
708 708
709 return 0; 709 return 0;
710} 710}
@@ -852,13 +852,13 @@ static int intelfb_single_fb_probe(struct drm_device *dev)
852 } else 852 } else
853 intelfb_set_par(info); 853 intelfb_set_par(info);
854 854
855 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, 855 DRM_INFO("fb%d: %s frame buffer device\n", info->node,
856 info->fix.id); 856 info->fix.id);
857 857
858 /* Switch back to kernel console on panic */ 858 /* Switch back to kernel console on panic */
859 kernelfb_mode = *modeset; 859 kernelfb_mode = *modeset;
860 atomic_notifier_chain_register(&panic_notifier_list, &paniced); 860 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
861 printk(KERN_INFO "registered panic notifier\n"); 861 DRM_DEBUG("registered panic notifier\n");
862 862
863 return 0; 863 return 0;
864} 864}
@@ -872,8 +872,8 @@ void intelfb_restore(void)
872{ 872{
873 int ret; 873 int ret;
874 if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) { 874 if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) {
875 printk(KERN_ERR "Failed to restore crtc configuration: %d\n", 875 DRM_ERROR("Failed to restore crtc configuration: %d\n",
876 ret); 876 ret);
877 } 877 }
878} 878}
879 879
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4ea2a651b92c..1842290cded3 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -31,6 +31,7 @@
31#include "drmP.h" 31#include "drmP.h"
32#include "drm.h" 32#include "drm.h"
33#include "drm_crtc.h" 33#include "drm_crtc.h"
34#include "drm_edid.h"
34#include "intel_drv.h" 35#include "intel_drv.h"
35#include "i915_drm.h" 36#include "i915_drm.h"
36#include "i915_drv.h" 37#include "i915_drv.h"
@@ -56,8 +57,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
56 sdvox = SDVO_ENCODING_HDMI | 57 sdvox = SDVO_ENCODING_HDMI |
57 SDVO_BORDER_ENABLE | 58 SDVO_BORDER_ENABLE |
58 SDVO_VSYNC_ACTIVE_HIGH | 59 SDVO_VSYNC_ACTIVE_HIGH |
59 SDVO_HSYNC_ACTIVE_HIGH | 60 SDVO_HSYNC_ACTIVE_HIGH;
60 SDVO_NULL_PACKETS_DURING_VSYNC;
61 61
62 if (hdmi_priv->has_hdmi_sink) 62 if (hdmi_priv->has_hdmi_sink)
63 sdvox |= SDVO_AUDIO_ENABLE; 63 sdvox |= SDVO_AUDIO_ENABLE;
@@ -129,83 +129,28 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
129 return true; 129 return true;
130} 130}
131 131
132static void
133intel_hdmi_sink_detect(struct drm_connector *connector)
134{
135 struct intel_output *intel_output = to_intel_output(connector);
136 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
137 struct edid *edid = NULL;
138
139 edid = drm_get_edid(&intel_output->base,
140 &intel_output->ddc_bus->adapter);
141 if (edid != NULL) {
142 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
143 kfree(edid);
144 intel_output->base.display_info.raw_edid = NULL;
145 }
146}
147
148static enum drm_connector_status
149igdng_hdmi_detect(struct drm_connector *connector)
150{
151 struct intel_output *intel_output = to_intel_output(connector);
152 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
153
154 /* FIXME hotplug detect */
155
156 hdmi_priv->has_hdmi_sink = false;
157 intel_hdmi_sink_detect(connector);
158 if (hdmi_priv->has_hdmi_sink)
159 return connector_status_connected;
160 else
161 return connector_status_disconnected;
162}
163
164static enum drm_connector_status 132static enum drm_connector_status
165intel_hdmi_detect(struct drm_connector *connector) 133intel_hdmi_detect(struct drm_connector *connector)
166{ 134{
167 struct drm_device *dev = connector->dev;
168 struct drm_i915_private *dev_priv = dev->dev_private;
169 struct intel_output *intel_output = to_intel_output(connector); 135 struct intel_output *intel_output = to_intel_output(connector);
170 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 136 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
171 u32 temp, bit; 137 struct edid *edid = NULL;
172 138 enum drm_connector_status status = connector_status_disconnected;
173 if (IS_IGDNG(dev))
174 return igdng_hdmi_detect(connector);
175
176 temp = I915_READ(PORT_HOTPLUG_EN);
177
178 switch (hdmi_priv->sdvox_reg) {
179 case SDVOB:
180 temp |= HDMIB_HOTPLUG_INT_EN;
181 break;
182 case SDVOC:
183 temp |= HDMIC_HOTPLUG_INT_EN;
184 break;
185 default:
186 return connector_status_unknown;
187 }
188
189 I915_WRITE(PORT_HOTPLUG_EN, temp);
190 139
191 POSTING_READ(PORT_HOTPLUG_EN); 140 hdmi_priv->has_hdmi_sink = false;
141 edid = drm_get_edid(&intel_output->base,
142 intel_output->ddc_bus);
192 143
193 switch (hdmi_priv->sdvox_reg) { 144 if (edid) {
194 case SDVOB: 145 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
195 bit = HDMIB_HOTPLUG_INT_STATUS; 146 status = connector_status_connected;
196 break; 147 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
197 case SDVOC: 148 }
198 bit = HDMIC_HOTPLUG_INT_STATUS; 149 intel_output->base.display_info.raw_edid = NULL;
199 break; 150 kfree(edid);
200 default:
201 return connector_status_unknown;
202 } 151 }
203 152
204 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { 153 return status;
205 intel_hdmi_sink_detect(connector);
206 return connector_status_connected;
207 } else
208 return connector_status_disconnected;
209} 154}
210 155
211static int intel_hdmi_get_modes(struct drm_connector *connector) 156static int intel_hdmi_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index f7061f68d050..62b8bead7652 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -124,6 +124,7 @@ static void set_data(void *data, int state_high)
124 * @output: driver specific output device 124 * @output: driver specific output device
125 * @reg: GPIO reg to use 125 * @reg: GPIO reg to use
126 * @name: name for this bus 126 * @name: name for this bus
127 * @slave_addr: slave address (if fixed)
127 * 128 *
128 * Creates and registers a new i2c bus with the Linux i2c layer, for use 129 * Creates and registers a new i2c bus with the Linux i2c layer, for use
129 * in output probing and control (e.g. DDC or SDVO control functions). 130 * in output probing and control (e.g. DDC or SDVO control functions).
@@ -139,8 +140,8 @@ static void set_data(void *data, int state_high)
139 * %GPIOH 140 * %GPIOH
140 * see PRM for details on how these different busses are used. 141 * see PRM for details on how these different busses are used.
141 */ 142 */
142struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, 143struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
143 const char *name) 144 const char *name)
144{ 145{
145 struct intel_i2c_chan *chan; 146 struct intel_i2c_chan *chan;
146 147
@@ -174,7 +175,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
174 intel_i2c_quirk_set(dev, false); 175 intel_i2c_quirk_set(dev, false);
175 udelay(20); 176 udelay(20);
176 177
177 return chan; 178 return &chan->adapter;
178 179
179out_free: 180out_free:
180 kfree(chan); 181 kfree(chan);
@@ -187,11 +188,16 @@ out_free:
187 * 188 *
188 * Unregister the adapter from the i2c layer, then free the structure. 189 * Unregister the adapter from the i2c layer, then free the structure.
189 */ 190 */
190void intel_i2c_destroy(struct intel_i2c_chan *chan) 191void intel_i2c_destroy(struct i2c_adapter *adapter)
191{ 192{
192 if (!chan) 193 struct intel_i2c_chan *chan;
194
195 if (!adapter)
193 return; 196 return;
194 197
198 chan = container_of(adapter,
199 struct intel_i2c_chan,
200 adapter);
195 i2c_del_adapter(&chan->adapter); 201 i2c_del_adapter(&chan->adapter);
196 kfree(chan); 202 kfree(chan);
197} 203}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f073ed8432e8..3f445a80c552 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -36,9 +36,25 @@
36#include "intel_drv.h" 36#include "intel_drv.h"
37#include "i915_drm.h" 37#include "i915_drm.h"
38#include "i915_drv.h" 38#include "i915_drv.h"
39#include <linux/acpi.h>
39 40
40#define I915_LVDS "i915_lvds" 41#define I915_LVDS "i915_lvds"
41 42
43/*
44 * the following four scaling options are defined.
45 * #define DRM_MODE_SCALE_NON_GPU 0
46 * #define DRM_MODE_SCALE_FULLSCREEN 1
47 * #define DRM_MODE_SCALE_NO_SCALE 2
48 * #define DRM_MODE_SCALE_ASPECT 3
49 */
50
51/* Private structure for the integrated LVDS support */
52struct intel_lvds_priv {
53 int fitting_mode;
54 u32 pfit_control;
55 u32 pfit_pgm_ratios;
56};
57
42/** 58/**
43 * Sets the backlight level. 59 * Sets the backlight level.
44 * 60 *
@@ -213,26 +229,45 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
213 struct drm_display_mode *mode, 229 struct drm_display_mode *mode,
214 struct drm_display_mode *adjusted_mode) 230 struct drm_display_mode *adjusted_mode)
215{ 231{
232 /*
233 * float point operation is not supported . So the PANEL_RATIO_FACTOR
234 * is defined, which can avoid the float point computation when
235 * calculating the panel ratio.
236 */
237#define PANEL_RATIO_FACTOR 8192
216 struct drm_device *dev = encoder->dev; 238 struct drm_device *dev = encoder->dev;
217 struct drm_i915_private *dev_priv = dev->dev_private; 239 struct drm_i915_private *dev_priv = dev->dev_private;
218 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 240 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
219 struct drm_encoder *tmp_encoder; 241 struct drm_encoder *tmp_encoder;
242 struct intel_output *intel_output = enc_to_intel_output(encoder);
243 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
244 u32 pfit_control = 0, pfit_pgm_ratios = 0;
245 int left_border = 0, right_border = 0, top_border = 0;
246 int bottom_border = 0;
247 bool border = 0;
248 int panel_ratio, desired_ratio, vert_scale, horiz_scale;
249 int horiz_ratio, vert_ratio;
250 u32 hsync_width, vsync_width;
251 u32 hblank_width, vblank_width;
252 u32 hsync_pos, vsync_pos;
220 253
221 /* Should never happen!! */ 254 /* Should never happen!! */
222 if (!IS_I965G(dev) && intel_crtc->pipe == 0) { 255 if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
223 printk(KERN_ERR "Can't support LVDS on pipe A\n"); 256 DRM_ERROR("Can't support LVDS on pipe A\n");
224 return false; 257 return false;
225 } 258 }
226 259
227 /* Should never happen!! */ 260 /* Should never happen!! */
228 list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { 261 list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
229 if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { 262 if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
230 printk(KERN_ERR "Can't enable LVDS and another " 263 DRM_ERROR("Can't enable LVDS and another "
231 "encoder on the same pipe\n"); 264 "encoder on the same pipe\n");
232 return false; 265 return false;
233 } 266 }
234 } 267 }
235 268 /* If we don't have a panel mode, there is nothing we can do */
269 if (dev_priv->panel_fixed_mode == NULL)
270 return true;
236 /* 271 /*
237 * If we have timings from the BIOS for the panel, put them in 272 * If we have timings from the BIOS for the panel, put them in
238 * to the adjusted mode. The CRTC will be set up for this mode, 273 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -256,6 +291,243 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
256 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); 291 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
257 } 292 }
258 293
294 /* Make sure pre-965s set dither correctly */
295 if (!IS_I965G(dev)) {
296 if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
297 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
298 }
299
300 /* Native modes don't need fitting */
301 if (adjusted_mode->hdisplay == mode->hdisplay &&
302 adjusted_mode->vdisplay == mode->vdisplay) {
303 pfit_pgm_ratios = 0;
304 border = 0;
305 goto out;
306 }
307
308 /* 965+ wants fuzzy fitting */
309 if (IS_I965G(dev))
310 pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
311 PFIT_FILTER_FUZZY;
312
313 hsync_width = adjusted_mode->crtc_hsync_end -
314 adjusted_mode->crtc_hsync_start;
315 vsync_width = adjusted_mode->crtc_vsync_end -
316 adjusted_mode->crtc_vsync_start;
317 hblank_width = adjusted_mode->crtc_hblank_end -
318 adjusted_mode->crtc_hblank_start;
319 vblank_width = adjusted_mode->crtc_vblank_end -
320 adjusted_mode->crtc_vblank_start;
321 /*
322 * Deal with panel fitting options. Figure out how to stretch the
323 * image based on its aspect ratio & the current panel fitting mode.
324 */
325 panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
326 adjusted_mode->vdisplay;
327 desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
328 mode->vdisplay;
329 /*
330 * Enable automatic panel scaling for non-native modes so that they fill
331 * the screen. Should be enabled before the pipe is enabled, according
332 * to register description and PRM.
333 * Change the value here to see the borders for debugging
334 */
335 I915_WRITE(BCLRPAT_A, 0);
336 I915_WRITE(BCLRPAT_B, 0);
337
338 switch (lvds_priv->fitting_mode) {
339 case DRM_MODE_SCALE_NO_SCALE:
340 /*
341 * For centered modes, we have to calculate border widths &
342 * heights and modify the values programmed into the CRTC.
343 */
344 left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2;
345 right_border = left_border;
346 if (mode->hdisplay & 1)
347 right_border++;
348 top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
349 bottom_border = top_border;
350 if (mode->vdisplay & 1)
351 bottom_border++;
352 /* Set active & border values */
353 adjusted_mode->crtc_hdisplay = mode->hdisplay;
354 /* Keep the boder be even */
355 if (right_border & 1)
356 right_border++;
357 /* use the border directly instead of border minuse one */
358 adjusted_mode->crtc_hblank_start = mode->hdisplay +
359 right_border;
360 /* keep the blank width constant */
361 adjusted_mode->crtc_hblank_end =
362 adjusted_mode->crtc_hblank_start + hblank_width;
363 /* get the hsync pos relative to hblank start */
364 hsync_pos = (hblank_width - hsync_width) / 2;
365 /* keep the hsync pos be even */
366 if (hsync_pos & 1)
367 hsync_pos++;
368 adjusted_mode->crtc_hsync_start =
369 adjusted_mode->crtc_hblank_start + hsync_pos;
370 /* keep the hsync width constant */
371 adjusted_mode->crtc_hsync_end =
372 adjusted_mode->crtc_hsync_start + hsync_width;
373 adjusted_mode->crtc_vdisplay = mode->vdisplay;
374 /* use the border instead of border minus one */
375 adjusted_mode->crtc_vblank_start = mode->vdisplay +
376 bottom_border;
377 /* keep the vblank width constant */
378 adjusted_mode->crtc_vblank_end =
379 adjusted_mode->crtc_vblank_start + vblank_width;
380 /* get the vsync start postion relative to vblank start */
381 vsync_pos = (vblank_width - vsync_width) / 2;
382 adjusted_mode->crtc_vsync_start =
383 adjusted_mode->crtc_vblank_start + vsync_pos;
384 /* keep the vsync width constant */
385 adjusted_mode->crtc_vsync_end =
386 adjusted_mode->crtc_vblank_start + vsync_width;
387 border = 1;
388 break;
389 case DRM_MODE_SCALE_ASPECT:
390 /* Scale but preserve the spect ratio */
391 pfit_control |= PFIT_ENABLE;
392 if (IS_I965G(dev)) {
393 /* 965+ is easy, it does everything in hw */
394 if (panel_ratio > desired_ratio)
395 pfit_control |= PFIT_SCALING_PILLAR;
396 else if (panel_ratio < desired_ratio)
397 pfit_control |= PFIT_SCALING_LETTER;
398 else
399 pfit_control |= PFIT_SCALING_AUTO;
400 } else {
401 /*
402 * For earlier chips we have to calculate the scaling
403 * ratio by hand and program it into the
404 * PFIT_PGM_RATIO register
405 */
406 u32 horiz_bits, vert_bits, bits = 12;
407 horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/
408 adjusted_mode->hdisplay;
409 vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/
410 adjusted_mode->vdisplay;
411 horiz_scale = adjusted_mode->hdisplay *
412 PANEL_RATIO_FACTOR / mode->hdisplay;
413 vert_scale = adjusted_mode->vdisplay *
414 PANEL_RATIO_FACTOR / mode->vdisplay;
415
416 /* retain aspect ratio */
417 if (panel_ratio > desired_ratio) { /* Pillar */
418 u32 scaled_width;
419 scaled_width = mode->hdisplay * vert_scale /
420 PANEL_RATIO_FACTOR;
421 horiz_ratio = vert_ratio;
422 pfit_control |= (VERT_AUTO_SCALE |
423 VERT_INTERP_BILINEAR |
424 HORIZ_INTERP_BILINEAR);
425 /* Pillar will have left/right borders */
426 left_border = (adjusted_mode->hdisplay -
427 scaled_width) / 2;
428 right_border = left_border;
429 if (mode->hdisplay & 1) /* odd resolutions */
430 right_border++;
431 /* keep the border be even */
432 if (right_border & 1)
433 right_border++;
434 adjusted_mode->crtc_hdisplay = scaled_width;
435 /* use border instead of border minus one */
436 adjusted_mode->crtc_hblank_start =
437 scaled_width + right_border;
438 /* keep the hblank width constant */
439 adjusted_mode->crtc_hblank_end =
440 adjusted_mode->crtc_hblank_start +
441 hblank_width;
442 /*
443 * get the hsync start pos relative to
444 * hblank start
445 */
446 hsync_pos = (hblank_width - hsync_width) / 2;
447 /* keep the hsync_pos be even */
448 if (hsync_pos & 1)
449 hsync_pos++;
450 adjusted_mode->crtc_hsync_start =
451 adjusted_mode->crtc_hblank_start +
452 hsync_pos;
453 /* keept hsync width constant */
454 adjusted_mode->crtc_hsync_end =
455 adjusted_mode->crtc_hsync_start +
456 hsync_width;
457 border = 1;
458 } else if (panel_ratio < desired_ratio) { /* letter */
459 u32 scaled_height = mode->vdisplay *
460 horiz_scale / PANEL_RATIO_FACTOR;
461 vert_ratio = horiz_ratio;
462 pfit_control |= (HORIZ_AUTO_SCALE |
463 VERT_INTERP_BILINEAR |
464 HORIZ_INTERP_BILINEAR);
465 /* Letterbox will have top/bottom border */
466 top_border = (adjusted_mode->vdisplay -
467 scaled_height) / 2;
468 bottom_border = top_border;
469 if (mode->vdisplay & 1)
470 bottom_border++;
471 adjusted_mode->crtc_vdisplay = scaled_height;
472 /* use border instead of border minus one */
473 adjusted_mode->crtc_vblank_start =
474 scaled_height + bottom_border;
475 /* keep the vblank width constant */
476 adjusted_mode->crtc_vblank_end =
477 adjusted_mode->crtc_vblank_start +
478 vblank_width;
479 /*
480 * get the vsync start pos relative to
481 * vblank start
482 */
483 vsync_pos = (vblank_width - vsync_width) / 2;
484 adjusted_mode->crtc_vsync_start =
485 adjusted_mode->crtc_vblank_start +
486 vsync_pos;
487 /* keep the vsync width constant */
488 adjusted_mode->crtc_vsync_end =
489 adjusted_mode->crtc_vsync_start +
490 vsync_width;
491 border = 1;
492 } else {
493 /* Aspects match, Let hw scale both directions */
494 pfit_control |= (VERT_AUTO_SCALE |
495 HORIZ_AUTO_SCALE |
496 VERT_INTERP_BILINEAR |
497 HORIZ_INTERP_BILINEAR);
498 }
499 horiz_bits = (1 << bits) * horiz_ratio /
500 PANEL_RATIO_FACTOR;
501 vert_bits = (1 << bits) * vert_ratio /
502 PANEL_RATIO_FACTOR;
503 pfit_pgm_ratios =
504 ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
505 PFIT_VERT_SCALE_MASK) |
506 ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
507 PFIT_HORIZ_SCALE_MASK);
508 }
509 break;
510
511 case DRM_MODE_SCALE_FULLSCREEN:
512 /*
513 * Full scaling, even if it changes the aspect ratio.
514 * Fortunately this is all done for us in hw.
515 */
516 pfit_control |= PFIT_ENABLE;
517 if (IS_I965G(dev))
518 pfit_control |= PFIT_SCALING_AUTO;
519 else
520 pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
521 VERT_INTERP_BILINEAR |
522 HORIZ_INTERP_BILINEAR);
523 break;
524 default:
525 break;
526 }
527
528out:
529 lvds_priv->pfit_control = pfit_control;
530 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
259 /* 531 /*
260 * XXX: It would be nice to support lower refresh rates on the 532 * XXX: It would be nice to support lower refresh rates on the
261 * panels to reduce power consumption, and perhaps match the 533 * panels to reduce power consumption, and perhaps match the
@@ -301,8 +573,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
301{ 573{
302 struct drm_device *dev = encoder->dev; 574 struct drm_device *dev = encoder->dev;
303 struct drm_i915_private *dev_priv = dev->dev_private; 575 struct drm_i915_private *dev_priv = dev->dev_private;
304 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 576 struct intel_output *intel_output = enc_to_intel_output(encoder);
305 u32 pfit_control; 577 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
306 578
307 /* 579 /*
308 * The LVDS pin pair will already have been turned on in the 580 * The LVDS pin pair will already have been turned on in the
@@ -319,22 +591,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
319 * screen. Should be enabled before the pipe is enabled, according to 591 * screen. Should be enabled before the pipe is enabled, according to
320 * register description and PRM. 592 * register description and PRM.
321 */ 593 */
322 if (mode->hdisplay != adjusted_mode->hdisplay || 594 I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
323 mode->vdisplay != adjusted_mode->vdisplay) 595 I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
324 pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
325 HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
326 HORIZ_INTERP_BILINEAR);
327 else
328 pfit_control = 0;
329
330 if (!IS_I965G(dev)) {
331 if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
332 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
333 }
334 else
335 pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
336
337 I915_WRITE(PFIT_CONTROL, pfit_control);
338} 596}
339 597
340/** 598/**
@@ -406,6 +664,34 @@ static int intel_lvds_set_property(struct drm_connector *connector,
406 struct drm_property *property, 664 struct drm_property *property,
407 uint64_t value) 665 uint64_t value)
408{ 666{
667 struct drm_device *dev = connector->dev;
668 struct intel_output *intel_output =
669 to_intel_output(connector);
670
671 if (property == dev->mode_config.scaling_mode_property &&
672 connector->encoder) {
673 struct drm_crtc *crtc = connector->encoder->crtc;
674 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
675 if (value == DRM_MODE_SCALE_NON_GPU) {
676 DRM_DEBUG_KMS(I915_LVDS,
677 "non_GPU property is unsupported\n");
678 return 0;
679 }
680 if (lvds_priv->fitting_mode == value) {
681 /* the LVDS scaling property is not changed */
682 return 0;
683 }
684 lvds_priv->fitting_mode = value;
685 if (crtc && crtc->enabled) {
686 /*
687 * If the CRTC is enabled, the display will be changed
688 * according to the new panel fitting mode.
689 */
690 drm_crtc_helper_set_mode(crtc, &crtc->mode,
691 crtc->x, crtc->y, crtc->fb);
692 }
693 }
694
409 return 0; 695 return 0;
410} 696}
411 697
@@ -456,7 +742,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
456 .callback = intel_no_lvds_dmi_callback, 742 .callback = intel_no_lvds_dmi_callback,
457 .ident = "Apple Mac Mini (Core series)", 743 .ident = "Apple Mac Mini (Core series)",
458 .matches = { 744 .matches = {
459 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 745 DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
460 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), 746 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
461 }, 747 },
462 }, 748 },
@@ -464,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
464 .callback = intel_no_lvds_dmi_callback, 750 .callback = intel_no_lvds_dmi_callback,
465 .ident = "Apple Mac Mini (Core 2 series)", 751 .ident = "Apple Mac Mini (Core 2 series)",
466 .matches = { 752 .matches = {
467 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 753 DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
468 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), 754 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
469 }, 755 },
470 }, 756 },
@@ -494,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
494 }, 780 },
495 { 781 {
496 .callback = intel_no_lvds_dmi_callback, 782 .callback = intel_no_lvds_dmi_callback,
783 .ident = "AOpen Mini PC MP915",
784 .matches = {
785 DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
786 DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"),
787 },
788 },
789 {
790 .callback = intel_no_lvds_dmi_callback,
497 .ident = "Aopen i945GTt-VFA", 791 .ident = "Aopen i945GTt-VFA",
498 .matches = { 792 .matches = {
499 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), 793 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
@@ -503,6 +797,65 @@ static const struct dmi_system_id intel_no_lvds[] = {
503 { } /* terminating entry */ 797 { } /* terminating entry */
504}; 798};
505 799
800#ifdef CONFIG_ACPI
801/*
802 * check_lid_device -- check whether @handle is an ACPI LID device.
803 * @handle: ACPI device handle
804 * @level : depth in the ACPI namespace tree
805 * @context: the number of LID device when we find the device
806 * @rv: a return value to fill if desired (Not use)
807 */
808static acpi_status
809check_lid_device(acpi_handle handle, u32 level, void *context,
810 void **return_value)
811{
812 struct acpi_device *acpi_dev;
813 int *lid_present = context;
814
815 acpi_dev = NULL;
816 /* Get the acpi device for device handle */
817 if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
818 /* If there is no ACPI device for handle, return */
819 return AE_OK;
820 }
821
822 if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
823 *lid_present = 1;
824
825 return AE_OK;
826}
827
828/**
829 * check whether there exists the ACPI LID device by enumerating the ACPI
830 * device tree.
831 */
832static int intel_lid_present(void)
833{
834 int lid_present = 0;
835
836 if (acpi_disabled) {
837 /* If ACPI is disabled, there is no ACPI device tree to
838 * check, so assume the LID device would have been present.
839 */
840 return 1;
841 }
842
843 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
844 ACPI_UINT32_MAX,
845 check_lid_device, &lid_present, NULL);
846
847 return lid_present;
848}
849#else
850static int intel_lid_present(void)
851{
852 /* In the absence of ACPI built in, assume that the LID device would
853 * have been present.
854 */
855 return 1;
856}
857#endif
858
506/** 859/**
507 * intel_lvds_init - setup LVDS connectors on this device 860 * intel_lvds_init - setup LVDS connectors on this device
508 * @dev: drm device 861 * @dev: drm device
@@ -518,6 +871,7 @@ void intel_lvds_init(struct drm_device *dev)
518 struct drm_encoder *encoder; 871 struct drm_encoder *encoder;
519 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 872 struct drm_display_mode *scan; /* *modes, *bios_mode; */
520 struct drm_crtc *crtc; 873 struct drm_crtc *crtc;
874 struct intel_lvds_priv *lvds_priv;
521 u32 lvds; 875 u32 lvds;
522 int pipe, gpio = GPIOC; 876 int pipe, gpio = GPIOC;
523 877
@@ -525,13 +879,28 @@ void intel_lvds_init(struct drm_device *dev)
525 if (dmi_check_system(intel_no_lvds)) 879 if (dmi_check_system(intel_no_lvds))
526 return; 880 return;
527 881
882 /* Assume that any device without an ACPI LID device also doesn't
883 * have an integrated LVDS. We would be better off parsing the BIOS
884 * to get a reliable indicator, but that code isn't written yet.
885 *
886 * In the case of all-in-one desktops using LVDS that we've seen,
887 * they're using SDVO LVDS.
888 */
889 if (!intel_lid_present())
890 return;
891
528 if (IS_IGDNG(dev)) { 892 if (IS_IGDNG(dev)) {
529 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 893 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
530 return; 894 return;
895 if (dev_priv->edp_support) {
896 DRM_DEBUG("disable LVDS for eDP support\n");
897 return;
898 }
531 gpio = PCH_GPIOC; 899 gpio = PCH_GPIOC;
532 } 900 }
533 901
534 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 902 intel_output = kzalloc(sizeof(struct intel_output) +
903 sizeof(struct intel_lvds_priv), GFP_KERNEL);
535 if (!intel_output) { 904 if (!intel_output) {
536 return; 905 return;
537 } 906 }
@@ -553,7 +922,18 @@ void intel_lvds_init(struct drm_device *dev)
553 connector->interlace_allowed = false; 922 connector->interlace_allowed = false;
554 connector->doublescan_allowed = false; 923 connector->doublescan_allowed = false;
555 924
925 lvds_priv = (struct intel_lvds_priv *)(intel_output + 1);
926 intel_output->dev_priv = lvds_priv;
927 /* create the scaling mode property */
928 drm_mode_create_scaling_mode_property(dev);
929 /*
930 * the initial panel fitting mode will be FULL_SCREEN.
931 */
556 932
933 drm_connector_attach_property(&intel_output->base,
934 dev->mode_config.scaling_mode_property,
935 DRM_MODE_SCALE_FULLSCREEN);
936 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
557 /* 937 /*
558 * LVDS discovery: 938 * LVDS discovery:
559 * 1) check for EDID on DDC 939 * 1) check for EDID on DDC
@@ -649,5 +1029,5 @@ failed:
649 if (intel_output->ddc_bus) 1029 if (intel_output->ddc_bus)
650 intel_i2c_destroy(intel_output->ddc_bus); 1030 intel_i2c_destroy(intel_output->ddc_bus);
651 drm_connector_cleanup(connector); 1031 drm_connector_cleanup(connector);
652 kfree(connector); 1032 kfree(intel_output);
653} 1033}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index e0910fefce87..67e2f4632a24 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -53,10 +53,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
53 } 53 }
54 }; 54 };
55 55
56 intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); 56 intel_i2c_quirk_set(intel_output->base.dev, true);
57 ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2); 57 ret = i2c_transfer(intel_output->ddc_bus, msgs, 2);
58 intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); 58 intel_i2c_quirk_set(intel_output->base.dev, false);
59
60 if (ret == 2) 59 if (ret == 2)
61 return true; 60 return true;
62 61
@@ -74,10 +73,9 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
74 struct edid *edid; 73 struct edid *edid;
75 int ret = 0; 74 int ret = 0;
76 75
77 intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); 76 intel_i2c_quirk_set(intel_output->base.dev, true);
78 edid = drm_get_edid(&intel_output->base, 77 edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
79 &intel_output->ddc_bus->adapter); 78 intel_i2c_quirk_set(intel_output->base.dev, false);
80 intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false);
81 if (edid) { 79 if (edid) {
82 drm_mode_connector_update_edid_property(&intel_output->base, 80 drm_mode_connector_update_edid_property(&intel_output->base,
83 edid); 81 edid);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9a00adb3a508..5371d9332554 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -31,6 +31,7 @@
31#include "drm.h" 31#include "drm.h"
32#include "drm_crtc.h" 32#include "drm_crtc.h"
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "drm_edid.h"
34#include "i915_drm.h" 35#include "i915_drm.h"
35#include "i915_drv.h" 36#include "i915_drv.h"
36#include "intel_sdvo_regs.h" 37#include "intel_sdvo_regs.h"
@@ -38,8 +39,7 @@
38#undef SDVO_DEBUG 39#undef SDVO_DEBUG
39#define I915_SDVO "i915_sdvo" 40#define I915_SDVO "i915_sdvo"
40struct intel_sdvo_priv { 41struct intel_sdvo_priv {
41 struct intel_i2c_chan *i2c_bus; 42 u8 slave_addr;
42 int slaveaddr;
43 43
44 /* Register for the SDVO device: SDVOB or SDVOC */ 44 /* Register for the SDVO device: SDVOB or SDVOC */
45 int output_device; 45 int output_device;
@@ -56,6 +56,12 @@ struct intel_sdvo_priv {
56 /* Pixel clock limitations reported by the SDVO device, in kHz */ 56 /* Pixel clock limitations reported by the SDVO device, in kHz */
57 int pixel_clock_min, pixel_clock_max; 57 int pixel_clock_min, pixel_clock_max;
58 58
59 /*
60 * For multiple function SDVO device,
61 * this is for current attached outputs.
62 */
63 uint16_t attached_output;
64
59 /** 65 /**
60 * This is set if we're going to treat the device as TV-out. 66 * This is set if we're going to treat the device as TV-out.
61 * 67 *
@@ -69,12 +75,23 @@ struct intel_sdvo_priv {
69 * This is set if we treat the device as HDMI, instead of DVI. 75 * This is set if we treat the device as HDMI, instead of DVI.
70 */ 76 */
71 bool is_hdmi; 77 bool is_hdmi;
78
72 /** 79 /**
73 * This is set if we detect output of sdvo device as LVDS. 80 * This is set if we detect output of sdvo device as LVDS.
74 */ 81 */
75 bool is_lvds; 82 bool is_lvds;
76 83
77 /** 84 /**
85 * This is sdvo flags for input timing.
86 */
87 uint8_t sdvo_flags;
88
89 /**
90 * This is sdvo fixed pannel mode pointer
91 */
92 struct drm_display_mode *sdvo_lvds_fixed_mode;
93
94 /**
78 * Returned SDTV resolutions allowed for the current format, if the 95 * Returned SDTV resolutions allowed for the current format, if the
79 * device reported it. 96 * device reported it.
80 */ 97 */
@@ -104,6 +121,9 @@ struct intel_sdvo_priv {
104 u32 save_SDVOX; 121 u32 save_SDVOX;
105}; 122};
106 123
124static bool
125intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags);
126
107/** 127/**
108 * Writes the SDVOB or SDVOC with the given value, but always writes both 128 * Writes the SDVOB or SDVOC with the given value, but always writes both
109 * SDVOB and SDVOC to work around apparent hardware issues (according to 129 * SDVOB and SDVOC to work around apparent hardware issues (according to
@@ -146,13 +166,13 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
146 166
147 struct i2c_msg msgs[] = { 167 struct i2c_msg msgs[] = {
148 { 168 {
149 .addr = sdvo_priv->i2c_bus->slave_addr, 169 .addr = sdvo_priv->slave_addr >> 1,
150 .flags = 0, 170 .flags = 0,
151 .len = 1, 171 .len = 1,
152 .buf = out_buf, 172 .buf = out_buf,
153 }, 173 },
154 { 174 {
155 .addr = sdvo_priv->i2c_bus->slave_addr, 175 .addr = sdvo_priv->slave_addr >> 1,
156 .flags = I2C_M_RD, 176 .flags = I2C_M_RD,
157 .len = 1, 177 .len = 1,
158 .buf = buf, 178 .buf = buf,
@@ -162,7 +182,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
162 out_buf[0] = addr; 182 out_buf[0] = addr;
163 out_buf[1] = 0; 183 out_buf[1] = 0;
164 184
165 if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2) 185 if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2)
166 { 186 {
167 *ch = buf[0]; 187 *ch = buf[0];
168 return true; 188 return true;
@@ -175,10 +195,11 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
175static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, 195static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
176 u8 ch) 196 u8 ch)
177{ 197{
198 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
178 u8 out_buf[2]; 199 u8 out_buf[2];
179 struct i2c_msg msgs[] = { 200 struct i2c_msg msgs[] = {
180 { 201 {
181 .addr = intel_output->i2c_bus->slave_addr, 202 .addr = sdvo_priv->slave_addr >> 1,
182 .flags = 0, 203 .flags = 0,
183 .len = 2, 204 .len = 2,
184 .buf = out_buf, 205 .buf = out_buf,
@@ -188,7 +209,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
188 out_buf[0] = addr; 209 out_buf[0] = addr;
189 out_buf[1] = ch; 210 out_buf[1] = ch;
190 211
191 if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1) 212 if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1)
192 { 213 {
193 return true; 214 return true;
194 } 215 }
@@ -592,6 +613,7 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
592 uint16_t height) 613 uint16_t height)
593{ 614{
594 struct intel_sdvo_preferred_input_timing_args args; 615 struct intel_sdvo_preferred_input_timing_args args;
616 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
595 uint8_t status; 617 uint8_t status;
596 618
597 memset(&args, 0, sizeof(args)); 619 memset(&args, 0, sizeof(args));
@@ -599,7 +621,12 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
599 args.width = width; 621 args.width = width;
600 args.height = height; 622 args.height = height;
601 args.interlace = 0; 623 args.interlace = 0;
602 args.scaled = 0; 624
625 if (sdvo_priv->is_lvds &&
626 (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width ||
627 sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
628 args.scaled = 1;
629
603 intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, 630 intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
604 &args, sizeof(args)); 631 &args, sizeof(args));
605 status = intel_sdvo_read_response(output, NULL, 0); 632 status = intel_sdvo_read_response(output, NULL, 0);
@@ -944,12 +971,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
944 struct intel_output *output = enc_to_intel_output(encoder); 971 struct intel_output *output = enc_to_intel_output(encoder);
945 struct intel_sdvo_priv *dev_priv = output->dev_priv; 972 struct intel_sdvo_priv *dev_priv = output->dev_priv;
946 973
947 if (!dev_priv->is_tv) { 974 if (dev_priv->is_tv) {
948 /* Make the CRTC code factor in the SDVO pixel multiplier. The
949 * SDVO device will be told of the multiplier during mode_set.
950 */
951 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
952 } else {
953 struct intel_sdvo_dtd output_dtd; 975 struct intel_sdvo_dtd output_dtd;
954 bool success; 976 bool success;
955 977
@@ -980,6 +1002,47 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
980 intel_sdvo_get_preferred_input_timing(output, 1002 intel_sdvo_get_preferred_input_timing(output,
981 &input_dtd); 1003 &input_dtd);
982 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1004 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1005 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
1006
1007 drm_mode_set_crtcinfo(adjusted_mode, 0);
1008
1009 mode->clock = adjusted_mode->clock;
1010
1011 adjusted_mode->clock *=
1012 intel_sdvo_get_pixel_multiplier(mode);
1013 } else {
1014 return false;
1015 }
1016 } else if (dev_priv->is_lvds) {
1017 struct intel_sdvo_dtd output_dtd;
1018 bool success;
1019
1020 drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0);
1021 /* Set output timings */
1022 intel_sdvo_get_dtd_from_mode(&output_dtd,
1023 dev_priv->sdvo_lvds_fixed_mode);
1024
1025 intel_sdvo_set_target_output(output,
1026 dev_priv->controlled_output);
1027 intel_sdvo_set_output_timing(output, &output_dtd);
1028
1029 /* Set the input timing to the screen. Assume always input 0. */
1030 intel_sdvo_set_target_input(output, true, false);
1031
1032
1033 success = intel_sdvo_create_preferred_input_timing(
1034 output,
1035 mode->clock / 10,
1036 mode->hdisplay,
1037 mode->vdisplay);
1038
1039 if (success) {
1040 struct intel_sdvo_dtd input_dtd;
1041
1042 intel_sdvo_get_preferred_input_timing(output,
1043 &input_dtd);
1044 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1045 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
983 1046
984 drm_mode_set_crtcinfo(adjusted_mode, 0); 1047 drm_mode_set_crtcinfo(adjusted_mode, 0);
985 1048
@@ -990,6 +1053,12 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
990 } else { 1053 } else {
991 return false; 1054 return false;
992 } 1055 }
1056
1057 } else {
1058 /* Make the CRTC code factor in the SDVO pixel multiplier. The
1059 * SDVO device will be told of the multiplier during mode_set.
1060 */
1061 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
993 } 1062 }
994 return true; 1063 return true;
995} 1064}
@@ -1033,15 +1102,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1033 1102
1034 /* We have tried to get input timing in mode_fixup, and filled into 1103 /* We have tried to get input timing in mode_fixup, and filled into
1035 adjusted_mode */ 1104 adjusted_mode */
1036 if (sdvo_priv->is_tv) 1105 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
1037 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1106 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1038 else 1107 input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags;
1108 } else
1039 intel_sdvo_get_dtd_from_mode(&input_dtd, mode); 1109 intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
1040 1110
1041 /* If it's a TV, we already set the output timing in mode_fixup. 1111 /* If it's a TV, we already set the output timing in mode_fixup.
1042 * Otherwise, the output timing is equal to the input timing. 1112 * Otherwise, the output timing is equal to the input timing.
1043 */ 1113 */
1044 if (!sdvo_priv->is_tv) { 1114 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
1045 /* Set the output timing to the screen */ 1115 /* Set the output timing to the screen */
1046 intel_sdvo_set_target_output(output, 1116 intel_sdvo_set_target_output(output,
1047 sdvo_priv->controlled_output); 1117 sdvo_priv->controlled_output);
@@ -1116,6 +1186,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1116 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; 1186 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
1117 } 1187 }
1118 1188
1189 if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
1190 sdvox |= SDVO_STALL_SELECT;
1119 intel_sdvo_write_sdvox(output, sdvox); 1191 intel_sdvo_write_sdvox(output, sdvox);
1120} 1192}
1121 1193
@@ -1276,6 +1348,17 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
1276 if (sdvo_priv->pixel_clock_max < mode->clock) 1348 if (sdvo_priv->pixel_clock_max < mode->clock)
1277 return MODE_CLOCK_HIGH; 1349 return MODE_CLOCK_HIGH;
1278 1350
1351 if (sdvo_priv->is_lvds == true) {
1352 if (sdvo_priv->sdvo_lvds_fixed_mode == NULL)
1353 return MODE_PANEL;
1354
1355 if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay)
1356 return MODE_PANEL;
1357
1358 if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay)
1359 return MODE_PANEL;
1360 }
1361
1279 return MODE_OK; 1362 return MODE_OK;
1280} 1363}
1281 1364
@@ -1362,42 +1445,96 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1362 intel_sdvo_read_response(intel_output, &response, 2); 1445 intel_sdvo_read_response(intel_output, &response, 2);
1363} 1446}
1364 1447
1365static void 1448static bool
1366intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) 1449intel_sdvo_multifunc_encoder(struct intel_output *intel_output)
1450{
1451 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1452 int caps = 0;
1453
1454 if (sdvo_priv->caps.output_flags &
1455 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
1456 caps++;
1457 if (sdvo_priv->caps.output_flags &
1458 (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
1459 caps++;
1460 if (sdvo_priv->caps.output_flags &
1461 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID0))
1462 caps++;
1463 if (sdvo_priv->caps.output_flags &
1464 (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
1465 caps++;
1466 if (sdvo_priv->caps.output_flags &
1467 (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
1468 caps++;
1469
1470 if (sdvo_priv->caps.output_flags &
1471 (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
1472 caps++;
1473
1474 if (sdvo_priv->caps.output_flags &
1475 (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
1476 caps++;
1477
1478 return (caps > 1);
1479}
1480
1481enum drm_connector_status
1482intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1367{ 1483{
1368 struct intel_output *intel_output = to_intel_output(connector); 1484 struct intel_output *intel_output = to_intel_output(connector);
1369 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1485 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1486 enum drm_connector_status status = connector_status_connected;
1370 struct edid *edid = NULL; 1487 struct edid *edid = NULL;
1371 1488
1372 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1373 edid = drm_get_edid(&intel_output->base, 1489 edid = drm_get_edid(&intel_output->base,
1374 &intel_output->ddc_bus->adapter); 1490 intel_output->ddc_bus);
1375 if (edid != NULL) { 1491 if (edid != NULL) {
1376 sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); 1492 /* Don't report the output as connected if it's a DVI-I
1493 * connector with a non-digital EDID coming out.
1494 */
1495 if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
1496 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1497 sdvo_priv->is_hdmi =
1498 drm_detect_hdmi_monitor(edid);
1499 else
1500 status = connector_status_disconnected;
1501 }
1502
1377 kfree(edid); 1503 kfree(edid);
1378 intel_output->base.display_info.raw_edid = NULL; 1504 intel_output->base.display_info.raw_edid = NULL;
1379 } 1505
1506 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
1507 status = connector_status_disconnected;
1508
1509 return status;
1380} 1510}
1381 1511
1382static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1512static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
1383{ 1513{
1384 u8 response[2]; 1514 uint16_t response;
1385 u8 status; 1515 u8 status;
1386 struct intel_output *intel_output = to_intel_output(connector); 1516 struct intel_output *intel_output = to_intel_output(connector);
1517 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1387 1518
1388 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); 1519 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
1389 status = intel_sdvo_read_response(intel_output, &response, 2); 1520 status = intel_sdvo_read_response(intel_output, &response, 2);
1390 1521
1391 DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); 1522 DRM_DEBUG("SDVO response %d %d\n", response & 0xff, response >> 8);
1392 1523
1393 if (status != SDVO_CMD_STATUS_SUCCESS) 1524 if (status != SDVO_CMD_STATUS_SUCCESS)
1394 return connector_status_unknown; 1525 return connector_status_unknown;
1395 1526
1396 if ((response[0] != 0) || (response[1] != 0)) { 1527 if (response == 0)
1397 intel_sdvo_hdmi_sink_detect(connector);
1398 return connector_status_connected;
1399 } else
1400 return connector_status_disconnected; 1528 return connector_status_disconnected;
1529
1530 if (intel_sdvo_multifunc_encoder(intel_output) &&
1531 sdvo_priv->attached_output != response) {
1532 if (sdvo_priv->controlled_output != response &&
1533 intel_sdvo_output_setup(intel_output, response) != true)
1534 return connector_status_unknown;
1535 sdvo_priv->attached_output = response;
1536 }
1537 return intel_sdvo_hdmi_sink_detect(connector, response);
1401} 1538}
1402 1539
1403static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1540static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
@@ -1549,23 +1686,21 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1549static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1686static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1550{ 1687{
1551 struct intel_output *intel_output = to_intel_output(connector); 1688 struct intel_output *intel_output = to_intel_output(connector);
1552 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1553 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1689 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1690 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1691 struct drm_display_mode *newmode;
1554 1692
1555 /* 1693 /*
1556 * Attempt to get the mode list from DDC. 1694 * Attempt to get the mode list from DDC.
1557 * Assume that the preferred modes are 1695 * Assume that the preferred modes are
1558 * arranged in priority order. 1696 * arranged in priority order.
1559 */ 1697 */
1560 /* set the bus switch and get the modes */
1561 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1562 intel_ddc_get_modes(intel_output); 1698 intel_ddc_get_modes(intel_output);
1563 if (list_empty(&connector->probed_modes) == false) 1699 if (list_empty(&connector->probed_modes) == false)
1564 return; 1700 goto end;
1565 1701
1566 /* Fetch modes from VBT */ 1702 /* Fetch modes from VBT */
1567 if (dev_priv->sdvo_lvds_vbt_mode != NULL) { 1703 if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
1568 struct drm_display_mode *newmode;
1569 newmode = drm_mode_duplicate(connector->dev, 1704 newmode = drm_mode_duplicate(connector->dev,
1570 dev_priv->sdvo_lvds_vbt_mode); 1705 dev_priv->sdvo_lvds_vbt_mode);
1571 if (newmode != NULL) { 1706 if (newmode != NULL) {
@@ -1575,6 +1710,16 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1575 drm_mode_probed_add(connector, newmode); 1710 drm_mode_probed_add(connector, newmode);
1576 } 1711 }
1577 } 1712 }
1713
1714end:
1715 list_for_each_entry(newmode, &connector->probed_modes, head) {
1716 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1717 sdvo_priv->sdvo_lvds_fixed_mode =
1718 drm_mode_duplicate(connector->dev, newmode);
1719 break;
1720 }
1721 }
1722
1578} 1723}
1579 1724
1580static int intel_sdvo_get_modes(struct drm_connector *connector) 1725static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -1597,14 +1742,20 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1597static void intel_sdvo_destroy(struct drm_connector *connector) 1742static void intel_sdvo_destroy(struct drm_connector *connector)
1598{ 1743{
1599 struct intel_output *intel_output = to_intel_output(connector); 1744 struct intel_output *intel_output = to_intel_output(connector);
1745 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1600 1746
1601 if (intel_output->i2c_bus) 1747 if (intel_output->i2c_bus)
1602 intel_i2c_destroy(intel_output->i2c_bus); 1748 intel_i2c_destroy(intel_output->i2c_bus);
1603 if (intel_output->ddc_bus) 1749 if (intel_output->ddc_bus)
1604 intel_i2c_destroy(intel_output->ddc_bus); 1750 intel_i2c_destroy(intel_output->ddc_bus);
1605 1751
1752 if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
1753 drm_mode_destroy(connector->dev,
1754 sdvo_priv->sdvo_lvds_fixed_mode);
1755
1606 drm_sysfs_connector_remove(connector); 1756 drm_sysfs_connector_remove(connector);
1607 drm_connector_cleanup(connector); 1757 drm_connector_cleanup(connector);
1758
1608 kfree(intel_output); 1759 kfree(intel_output);
1609} 1760}
1610 1761
@@ -1709,7 +1860,7 @@ intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
1709 1860
1710 list_for_each_entry(connector, 1861 list_for_each_entry(connector,
1711 &dev->mode_config.connector_list, head) { 1862 &dev->mode_config.connector_list, head) {
1712 if (to_intel_output(connector)->ddc_bus == chan) { 1863 if (to_intel_output(connector)->ddc_bus == &chan->adapter) {
1713 intel_output = to_intel_output(connector); 1864 intel_output = to_intel_output(connector);
1714 break; 1865 break;
1715 } 1866 }
@@ -1723,7 +1874,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
1723 struct intel_output *intel_output; 1874 struct intel_output *intel_output;
1724 struct intel_sdvo_priv *sdvo_priv; 1875 struct intel_sdvo_priv *sdvo_priv;
1725 struct i2c_algo_bit_data *algo_data; 1876 struct i2c_algo_bit_data *algo_data;
1726 struct i2c_algorithm *algo; 1877 const struct i2c_algorithm *algo;
1727 1878
1728 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; 1879 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
1729 intel_output = 1880 intel_output =
@@ -1733,7 +1884,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
1733 return -EINVAL; 1884 return -EINVAL;
1734 1885
1735 sdvo_priv = intel_output->dev_priv; 1886 sdvo_priv = intel_output->dev_priv;
1736 algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo; 1887 algo = intel_output->i2c_bus->algo;
1737 1888
1738 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); 1889 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1739 return algo->master_xfer(i2c_adap, msgs, num); 1890 return algo->master_xfer(i2c_adap, msgs, num);
@@ -1780,18 +1931,101 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
1780 return 0x72; 1931 return 0x72;
1781} 1932}
1782 1933
1934static bool
1935intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
1936{
1937 struct drm_connector *connector = &intel_output->base;
1938 struct drm_encoder *encoder = &intel_output->enc;
1939 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1940 bool ret = true, registered = false;
1941
1942 sdvo_priv->is_tv = false;
1943 intel_output->needs_tv_clock = false;
1944 sdvo_priv->is_lvds = false;
1945
1946 if (device_is_registered(&connector->kdev)) {
1947 drm_sysfs_connector_remove(connector);
1948 registered = true;
1949 }
1950
1951 if (flags &
1952 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
1953 if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
1954 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
1955 else
1956 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
1957
1958 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
1959 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
1960
1961 if (intel_sdvo_get_supp_encode(intel_output,
1962 &sdvo_priv->encode) &&
1963 intel_sdvo_get_digital_encoding_mode(intel_output) &&
1964 sdvo_priv->is_hdmi) {
1965 /* enable hdmi encoding mode if supported */
1966 intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI);
1967 intel_sdvo_set_colorimetry(intel_output,
1968 SDVO_COLORIMETRY_RGB256);
1969 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
1970 }
1971 } else if (flags & SDVO_OUTPUT_SVID0) {
1972
1973 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
1974 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
1975 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
1976 sdvo_priv->is_tv = true;
1977 intel_output->needs_tv_clock = true;
1978 } else if (flags & SDVO_OUTPUT_RGB0) {
1979
1980 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
1981 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
1982 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
1983 } else if (flags & SDVO_OUTPUT_RGB1) {
1984
1985 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
1986 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
1987 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
1988 } else if (flags & SDVO_OUTPUT_LVDS0) {
1989
1990 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
1991 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
1992 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
1993 sdvo_priv->is_lvds = true;
1994 } else if (flags & SDVO_OUTPUT_LVDS1) {
1995
1996 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
1997 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
1998 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
1999 sdvo_priv->is_lvds = true;
2000 } else {
2001
2002 unsigned char bytes[2];
2003
2004 sdvo_priv->controlled_output = 0;
2005 memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
2006 DRM_DEBUG_KMS(I915_SDVO,
2007 "%s: Unknown SDVO output type (0x%02x%02x)\n",
2008 SDVO_NAME(sdvo_priv),
2009 bytes[0], bytes[1]);
2010 ret = false;
2011 }
2012
2013 if (ret && registered)
2014 ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
2015
2016
2017 return ret;
2018
2019}
2020
1783bool intel_sdvo_init(struct drm_device *dev, int output_device) 2021bool intel_sdvo_init(struct drm_device *dev, int output_device)
1784{ 2022{
1785 struct drm_connector *connector; 2023 struct drm_connector *connector;
1786 struct intel_output *intel_output; 2024 struct intel_output *intel_output;
1787 struct intel_sdvo_priv *sdvo_priv; 2025 struct intel_sdvo_priv *sdvo_priv;
1788 struct intel_i2c_chan *i2cbus = NULL; 2026
1789 struct intel_i2c_chan *ddcbus = NULL;
1790 int connector_type;
1791 u8 ch[0x40]; 2027 u8 ch[0x40];
1792 int i; 2028 int i;
1793 int encoder_type, output_id;
1794 u8 slave_addr;
1795 2029
1796 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 2030 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
1797 if (!intel_output) { 2031 if (!intel_output) {
@@ -1799,29 +2033,24 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1799 } 2033 }
1800 2034
1801 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); 2035 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
2036 sdvo_priv->output_device = output_device;
2037
2038 intel_output->dev_priv = sdvo_priv;
1802 intel_output->type = INTEL_OUTPUT_SDVO; 2039 intel_output->type = INTEL_OUTPUT_SDVO;
1803 2040
1804 /* setup the DDC bus. */ 2041 /* setup the DDC bus. */
1805 if (output_device == SDVOB) 2042 if (output_device == SDVOB)
1806 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 2043 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
1807 else 2044 else
1808 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 2045 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
1809 2046
1810 if (!i2cbus) 2047 if (!intel_output->i2c_bus)
1811 goto err_inteloutput; 2048 goto err_inteloutput;
1812 2049
1813 slave_addr = intel_sdvo_get_slave_addr(dev, output_device); 2050 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
1814 sdvo_priv->i2c_bus = i2cbus;
1815 2051
1816 if (output_device == SDVOB) { 2052 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
1817 output_id = 1; 2053 intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
1818 } else {
1819 output_id = 2;
1820 }
1821 sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1;
1822 sdvo_priv->output_device = output_device;
1823 intel_output->i2c_bus = i2cbus;
1824 intel_output->dev_priv = sdvo_priv;
1825 2054
1826 /* Read the regs to test if we can talk to the device */ 2055 /* Read the regs to test if we can talk to the device */
1827 for (i = 0; i < 0x40; i++) { 2056 for (i = 0; i < 0x40; i++) {
@@ -1835,101 +2064,39 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1835 2064
1836 /* setup the DDC bus. */ 2065 /* setup the DDC bus. */
1837 if (output_device == SDVOB) 2066 if (output_device == SDVOB)
1838 ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2067 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
1839 else 2068 else
1840 ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2069 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
1841 2070
1842 if (ddcbus == NULL) 2071 if (intel_output->ddc_bus == NULL)
1843 goto err_i2c; 2072 goto err_i2c;
1844 2073
1845 intel_sdvo_i2c_bit_algo.functionality = 2074 /* Wrap with our custom algo which switches to DDC mode */
1846 intel_output->i2c_bus->adapter.algo->functionality; 2075 intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
1847 ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo;
1848 intel_output->ddc_bus = ddcbus;
1849 2076
1850 /* In defaut case sdvo lvds is false */ 2077 /* In defaut case sdvo lvds is false */
1851 sdvo_priv->is_lvds = false;
1852 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); 2078 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
1853 2079
1854 if (sdvo_priv->caps.output_flags & 2080 if (intel_sdvo_output_setup(intel_output,
1855 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { 2081 sdvo_priv->caps.output_flags) != true) {
1856 if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) 2082 DRM_DEBUG("SDVO output failed to setup on SDVO%c\n",
1857 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; 2083 output_device == SDVOB ? 'B' : 'C');
1858 else
1859 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
1860
1861 encoder_type = DRM_MODE_ENCODER_TMDS;
1862 connector_type = DRM_MODE_CONNECTOR_DVID;
1863
1864 if (intel_sdvo_get_supp_encode(intel_output,
1865 &sdvo_priv->encode) &&
1866 intel_sdvo_get_digital_encoding_mode(intel_output) &&
1867 sdvo_priv->is_hdmi) {
1868 /* enable hdmi encoding mode if supported */
1869 intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI);
1870 intel_sdvo_set_colorimetry(intel_output,
1871 SDVO_COLORIMETRY_RGB256);
1872 connector_type = DRM_MODE_CONNECTOR_HDMIA;
1873 }
1874 }
1875 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0)
1876 {
1877 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
1878 encoder_type = DRM_MODE_ENCODER_TVDAC;
1879 connector_type = DRM_MODE_CONNECTOR_SVIDEO;
1880 sdvo_priv->is_tv = true;
1881 intel_output->needs_tv_clock = true;
1882 }
1883 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
1884 {
1885 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
1886 encoder_type = DRM_MODE_ENCODER_DAC;
1887 connector_type = DRM_MODE_CONNECTOR_VGA;
1888 }
1889 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
1890 {
1891 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
1892 encoder_type = DRM_MODE_ENCODER_DAC;
1893 connector_type = DRM_MODE_CONNECTOR_VGA;
1894 }
1895 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0)
1896 {
1897 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
1898 encoder_type = DRM_MODE_ENCODER_LVDS;
1899 connector_type = DRM_MODE_CONNECTOR_LVDS;
1900 sdvo_priv->is_lvds = true;
1901 }
1902 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1)
1903 {
1904 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
1905 encoder_type = DRM_MODE_ENCODER_LVDS;
1906 connector_type = DRM_MODE_CONNECTOR_LVDS;
1907 sdvo_priv->is_lvds = true;
1908 }
1909 else
1910 {
1911 unsigned char bytes[2];
1912
1913 sdvo_priv->controlled_output = 0;
1914 memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
1915 DRM_DEBUG_KMS(I915_SDVO,
1916 "%s: Unknown SDVO output type (0x%02x%02x)\n",
1917 SDVO_NAME(sdvo_priv),
1918 bytes[0], bytes[1]);
1919 encoder_type = DRM_MODE_ENCODER_NONE;
1920 connector_type = DRM_MODE_CONNECTOR_Unknown;
1921 goto err_i2c; 2084 goto err_i2c;
1922 } 2085 }
1923 2086
2087
1924 connector = &intel_output->base; 2088 connector = &intel_output->base;
1925 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, 2089 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
1926 connector_type); 2090 connector->connector_type);
2091
1927 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); 2092 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
1928 connector->interlace_allowed = 0; 2093 connector->interlace_allowed = 0;
1929 connector->doublescan_allowed = 0; 2094 connector->doublescan_allowed = 0;
1930 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 2095 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1931 2096
1932 drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); 2097 drm_encoder_init(dev, &intel_output->enc,
2098 &intel_sdvo_enc_funcs, intel_output->enc.encoder_type);
2099
1933 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); 2100 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
1934 2101
1935 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 2102 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
@@ -1965,9 +2132,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1965 return true; 2132 return true;
1966 2133
1967err_i2c: 2134err_i2c:
1968 if (ddcbus != NULL) 2135 if (intel_output->ddc_bus != NULL)
1969 intel_i2c_destroy(intel_output->ddc_bus); 2136 intel_i2c_destroy(intel_output->ddc_bus);
1970 intel_i2c_destroy(intel_output->i2c_bus); 2137 if (intel_output->i2c_bus != NULL)
2138 intel_i2c_destroy(intel_output->i2c_bus);
1971err_inteloutput: 2139err_inteloutput:
1972 kfree(intel_output); 2140 kfree(intel_output);
1973 2141
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 193938b7d7f9..ba5cdf8ae40b 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -715,6 +715,7 @@ struct intel_sdvo_enhancements_arg {
715 #define SDVO_HBUF_TX_ONCE (2 << 6) 715 #define SDVO_HBUF_TX_ONCE (2 << 6)
716 #define SDVO_HBUF_TX_VSYNC (3 << 6) 716 #define SDVO_HBUF_TX_VSYNC (3 << 6)
717#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c 717#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
718#define SDVO_NEED_TO_STALL (1 << 7)
718 719
719struct intel_sdvo_encode{ 720struct intel_sdvo_encode{
720 u8 dvi_rev; 721 u8 dvi_rev;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea68992e4416..da4ab4dc1630 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1383,34 +1383,31 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1383 /* 1383 /*
1384 * Detect TV by polling) 1384 * Detect TV by polling)
1385 */ 1385 */
1386 if (intel_output->load_detect_temp) { 1386 save_tv_dac = tv_dac;
1387 /* TV not currently running, prod it with destructive detect */ 1387 tv_ctl = I915_READ(TV_CTL);
1388 save_tv_dac = tv_dac; 1388 save_tv_ctl = tv_ctl;
1389 tv_ctl = I915_READ(TV_CTL); 1389 tv_ctl &= ~TV_ENC_ENABLE;
1390 save_tv_ctl = tv_ctl; 1390 tv_ctl &= ~TV_TEST_MODE_MASK;
1391 tv_ctl &= ~TV_ENC_ENABLE; 1391 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1392 tv_ctl &= ~TV_TEST_MODE_MASK; 1392 tv_dac &= ~TVDAC_SENSE_MASK;
1393 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; 1393 tv_dac &= ~DAC_A_MASK;
1394 tv_dac &= ~TVDAC_SENSE_MASK; 1394 tv_dac &= ~DAC_B_MASK;
1395 tv_dac &= ~DAC_A_MASK; 1395 tv_dac &= ~DAC_C_MASK;
1396 tv_dac &= ~DAC_B_MASK; 1396 tv_dac |= (TVDAC_STATE_CHG_EN |
1397 tv_dac &= ~DAC_C_MASK; 1397 TVDAC_A_SENSE_CTL |
1398 tv_dac |= (TVDAC_STATE_CHG_EN | 1398 TVDAC_B_SENSE_CTL |
1399 TVDAC_A_SENSE_CTL | 1399 TVDAC_C_SENSE_CTL |
1400 TVDAC_B_SENSE_CTL | 1400 DAC_CTL_OVERRIDE |
1401 TVDAC_C_SENSE_CTL | 1401 DAC_A_0_7_V |
1402 DAC_CTL_OVERRIDE | 1402 DAC_B_0_7_V |
1403 DAC_A_0_7_V | 1403 DAC_C_0_7_V);
1404 DAC_B_0_7_V | 1404 I915_WRITE(TV_CTL, tv_ctl);
1405 DAC_C_0_7_V); 1405 I915_WRITE(TV_DAC, tv_dac);
1406 I915_WRITE(TV_CTL, tv_ctl); 1406 intel_wait_for_vblank(dev);
1407 I915_WRITE(TV_DAC, tv_dac); 1407 tv_dac = I915_READ(TV_DAC);
1408 intel_wait_for_vblank(dev); 1408 I915_WRITE(TV_DAC, save_tv_dac);
1409 tv_dac = I915_READ(TV_DAC); 1409 I915_WRITE(TV_CTL, save_tv_ctl);
1410 I915_WRITE(TV_DAC, save_tv_dac); 1410 intel_wait_for_vblank(dev);
1411 I915_WRITE(TV_CTL, save_tv_ctl);
1412 intel_wait_for_vblank(dev);
1413 }
1414 /* 1411 /*
1415 * A B C 1412 * A B C
1416 * 0 1 1 Composite 1413 * 0 1 1 Composite
@@ -1493,6 +1490,27 @@ static struct input_res {
1493 {"1920x1080", 1920, 1080}, 1490 {"1920x1080", 1920, 1080},
1494}; 1491};
1495 1492
1493/*
1494 * Chose preferred mode according to line number of TV format
1495 */
1496static void
1497intel_tv_chose_preferred_modes(struct drm_connector *connector,
1498 struct drm_display_mode *mode_ptr)
1499{
1500 struct intel_output *intel_output = to_intel_output(connector);
1501 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1502
1503 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
1504 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
1505 else if (tv_mode->nbr_end > 480) {
1506 if (tv_mode->progressive == true && tv_mode->nbr_end < 720) {
1507 if (mode_ptr->vdisplay == 720)
1508 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
1509 } else if (mode_ptr->vdisplay == 1080)
1510 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
1511 }
1512}
1513
1496/** 1514/**
1497 * Stub get_modes function. 1515 * Stub get_modes function.
1498 * 1516 *
@@ -1547,6 +1565,7 @@ intel_tv_get_modes(struct drm_connector *connector)
1547 mode_ptr->clock = (int) tmp; 1565 mode_ptr->clock = (int) tmp;
1548 1566
1549 mode_ptr->type = DRM_MODE_TYPE_DRIVER; 1567 mode_ptr->type = DRM_MODE_TYPE_DRIVER;
1568 intel_tv_chose_preferred_modes(connector, mode_ptr);
1550 drm_mode_probed_add(connector, mode_ptr); 1569 drm_mode_probed_add(connector, mode_ptr);
1551 count++; 1570 count++;
1552 } 1571 }
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 5fae1e074b4b..013d38059943 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -13,7 +13,8 @@ radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
13 radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ 13 radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
14 radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ 14 radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
15 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 15 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
16 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o 16 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \
17 radeon_test.o
17 18
18radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 19radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
19 20
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c0080cc9bf8d..74d034f77c6b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -31,6 +31,132 @@
31#include "atom.h" 31#include "atom.h"
32#include "atom-bits.h" 32#include "atom-bits.h"
33 33
34static void atombios_overscan_setup(struct drm_crtc *crtc,
35 struct drm_display_mode *mode,
36 struct drm_display_mode *adjusted_mode)
37{
38 struct drm_device *dev = crtc->dev;
39 struct radeon_device *rdev = dev->dev_private;
40 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
41 SET_CRTC_OVERSCAN_PS_ALLOCATION args;
42 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
43 int a1, a2;
44
45 memset(&args, 0, sizeof(args));
46
47 args.usOverscanRight = 0;
48 args.usOverscanLeft = 0;
49 args.usOverscanBottom = 0;
50 args.usOverscanTop = 0;
51 args.ucCRTC = radeon_crtc->crtc_id;
52
53 switch (radeon_crtc->rmx_type) {
54 case RMX_CENTER:
55 args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
56 args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
57 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
58 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
59 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
60 break;
61 case RMX_ASPECT:
62 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
63 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
64
65 if (a1 > a2) {
66 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
67 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
68 } else if (a2 > a1) {
69 args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
70 args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
71 }
72 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
73 break;
74 case RMX_FULL:
75 default:
76 args.usOverscanRight = 0;
77 args.usOverscanLeft = 0;
78 args.usOverscanBottom = 0;
79 args.usOverscanTop = 0;
80 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
81 break;
82 }
83}
84
85static void atombios_scaler_setup(struct drm_crtc *crtc)
86{
87 struct drm_device *dev = crtc->dev;
88 struct radeon_device *rdev = dev->dev_private;
89 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
90 ENABLE_SCALER_PS_ALLOCATION args;
91 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
92 /* fixme - fill in enc_priv for atom dac */
93 enum radeon_tv_std tv_std = TV_STD_NTSC;
94
95 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
96 return;
97
98 memset(&args, 0, sizeof(args));
99
100 args.ucScaler = radeon_crtc->crtc_id;
101
102 if (radeon_crtc->devices & (ATOM_DEVICE_TV_SUPPORT)) {
103 switch (tv_std) {
104 case TV_STD_NTSC:
105 default:
106 args.ucTVStandard = ATOM_TV_NTSC;
107 break;
108 case TV_STD_PAL:
109 args.ucTVStandard = ATOM_TV_PAL;
110 break;
111 case TV_STD_PAL_M:
112 args.ucTVStandard = ATOM_TV_PALM;
113 break;
114 case TV_STD_PAL_60:
115 args.ucTVStandard = ATOM_TV_PAL60;
116 break;
117 case TV_STD_NTSC_J:
118 args.ucTVStandard = ATOM_TV_NTSCJ;
119 break;
120 case TV_STD_SCART_PAL:
121 args.ucTVStandard = ATOM_TV_PAL; /* ??? */
122 break;
123 case TV_STD_SECAM:
124 args.ucTVStandard = ATOM_TV_SECAM;
125 break;
126 case TV_STD_PAL_CN:
127 args.ucTVStandard = ATOM_TV_PALCN;
128 break;
129 }
130 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
131 } else if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT)) {
132 args.ucTVStandard = ATOM_TV_CV;
133 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
134 } else {
135 switch (radeon_crtc->rmx_type) {
136 case RMX_FULL:
137 args.ucEnable = ATOM_SCALER_EXPANSION;
138 break;
139 case RMX_CENTER:
140 args.ucEnable = ATOM_SCALER_CENTER;
141 break;
142 case RMX_ASPECT:
143 args.ucEnable = ATOM_SCALER_EXPANSION;
144 break;
145 default:
146 if (ASIC_IS_AVIVO(rdev))
147 args.ucEnable = ATOM_SCALER_DISABLE;
148 else
149 args.ucEnable = ATOM_SCALER_CENTER;
150 break;
151 }
152 }
153 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
154 if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
155 && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
156 atom_rv515_force_tv_scaler(rdev);
157 }
158}
159
34static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) 160static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
35{ 161{
36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 162 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -203,6 +329,12 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
203 if (ASIC_IS_AVIVO(rdev)) { 329 if (ASIC_IS_AVIVO(rdev)) {
204 uint32_t ss_cntl; 330 uint32_t ss_cntl;
205 331
332 if ((rdev->family == CHIP_RS600) ||
333 (rdev->family == CHIP_RS690) ||
334 (rdev->family == CHIP_RS740))
335 pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
336 RADEON_PLL_PREFER_CLOSEST_LOWER);
337
206 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 338 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
207 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 339 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
208 else 340 else
@@ -321,7 +453,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
321 struct drm_gem_object *obj; 453 struct drm_gem_object *obj;
322 struct drm_radeon_gem_object *obj_priv; 454 struct drm_radeon_gem_object *obj_priv;
323 uint64_t fb_location; 455 uint64_t fb_location;
324 uint32_t fb_format, fb_pitch_pixels; 456 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
325 457
326 if (!crtc->fb) 458 if (!crtc->fb)
327 return -EINVAL; 459 return -EINVAL;
@@ -358,7 +490,14 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
358 return -EINVAL; 490 return -EINVAL;
359 } 491 }
360 492
361 /* TODO tiling */ 493 radeon_object_get_tiling_flags(obj->driver_private,
494 &tiling_flags, NULL);
495 if (tiling_flags & RADEON_TILING_MACRO)
496 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
497
498 if (tiling_flags & RADEON_TILING_MICRO)
499 fb_format |= AVIVO_D1GRPH_TILED;
500
362 if (radeon_crtc->crtc_id == 0) 501 if (radeon_crtc->crtc_id == 0)
363 WREG32(AVIVO_D1VGA_CONTROL, 0); 502 WREG32(AVIVO_D1VGA_CONTROL, 0);
364 else 503 else
@@ -509,6 +648,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
509 radeon_crtc_set_base(crtc, x, y, old_fb); 648 radeon_crtc_set_base(crtc, x, y, old_fb);
510 radeon_legacy_atom_set_surface(crtc); 649 radeon_legacy_atom_set_surface(crtc);
511 } 650 }
651 atombios_overscan_setup(crtc, mode, adjusted_mode);
652 atombios_scaler_setup(crtc);
653 radeon_bandwidth_update(rdev);
512 return 0; 654 return 0;
513} 655}
514 656
@@ -516,6 +658,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
516 struct drm_display_mode *mode, 658 struct drm_display_mode *mode,
517 struct drm_display_mode *adjusted_mode) 659 struct drm_display_mode *adjusted_mode)
518{ 660{
661 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
662 return false;
519 return true; 663 return true;
520} 664}
521 665
@@ -548,148 +692,3 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
548 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; 692 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
549 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); 693 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
550} 694}
551
552void radeon_init_disp_bw_avivo(struct drm_device *dev,
553 struct drm_display_mode *mode1,
554 uint32_t pixel_bytes1,
555 struct drm_display_mode *mode2,
556 uint32_t pixel_bytes2)
557{
558 struct radeon_device *rdev = dev->dev_private;
559 fixed20_12 min_mem_eff;
560 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
561 fixed20_12 sclk_ff, mclk_ff;
562 uint32_t dc_lb_memory_split, temp;
563
564 min_mem_eff.full = rfixed_const_8(0);
565 if (rdev->disp_priority == 2) {
566 uint32_t mc_init_misc_lat_timer = 0;
567 if (rdev->family == CHIP_RV515)
568 mc_init_misc_lat_timer =
569 RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER);
570 else if (rdev->family == CHIP_RS690)
571 mc_init_misc_lat_timer =
572 RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER);
573
574 mc_init_misc_lat_timer &=
575 ~(R300_MC_DISP1R_INIT_LAT_MASK <<
576 R300_MC_DISP1R_INIT_LAT_SHIFT);
577 mc_init_misc_lat_timer &=
578 ~(R300_MC_DISP0R_INIT_LAT_MASK <<
579 R300_MC_DISP0R_INIT_LAT_SHIFT);
580
581 if (mode2)
582 mc_init_misc_lat_timer |=
583 (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
584 if (mode1)
585 mc_init_misc_lat_timer |=
586 (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
587
588 if (rdev->family == CHIP_RV515)
589 WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER,
590 mc_init_misc_lat_timer);
591 else if (rdev->family == CHIP_RS690)
592 WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER,
593 mc_init_misc_lat_timer);
594 }
595
596 /*
597 * determine is there is enough bw for current mode
598 */
599 temp_ff.full = rfixed_const(100);
600 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
601 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
602 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
603 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
604
605 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
606 temp_ff.full = rfixed_const(temp);
607 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
608 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
609
610 pix_clk.full = 0;
611 pix_clk2.full = 0;
612 peak_disp_bw.full = 0;
613 if (mode1) {
614 temp_ff.full = rfixed_const(1000);
615 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
616 pix_clk.full = rfixed_div(pix_clk, temp_ff);
617 temp_ff.full = rfixed_const(pixel_bytes1);
618 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
619 }
620 if (mode2) {
621 temp_ff.full = rfixed_const(1000);
622 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
623 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
624 temp_ff.full = rfixed_const(pixel_bytes2);
625 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
626 }
627
628 if (peak_disp_bw.full >= mem_bw.full) {
629 DRM_ERROR
630 ("You may not have enough display bandwidth for current mode\n"
631 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
632 printk("peak disp bw %d, mem_bw %d\n",
633 rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw));
634 }
635
636 /*
637 * Line Buffer Setup
638 * There is a single line buffer shared by both display controllers.
639 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display
640 * controllers. The paritioning can either be done manually or via one of four
641 * preset allocations specified in bits 1:0:
642 * 0 - line buffer is divided in half and shared between each display controller
643 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
644 * 2 - D1 gets the whole buffer
645 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
646 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode.
647 * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits
648 * 14:4; D2 allocation follows D1.
649 */
650
651 /* is auto or manual better ? */
652 dc_lb_memory_split =
653 RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK;
654 dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
655#if 1
656 /* auto */
657 if (mode1 && mode2) {
658 if (mode1->hdisplay > mode2->hdisplay) {
659 if (mode1->hdisplay > 2560)
660 dc_lb_memory_split |=
661 AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
662 else
663 dc_lb_memory_split |=
664 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
665 } else if (mode2->hdisplay > mode1->hdisplay) {
666 if (mode2->hdisplay > 2560)
667 dc_lb_memory_split |=
668 AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
669 else
670 dc_lb_memory_split |=
671 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
672 } else
673 dc_lb_memory_split |=
674 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
675 } else if (mode1) {
676 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY;
677 } else if (mode2) {
678 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
679 }
680#else
681 /* manual */
682 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
683 dc_lb_memory_split &=
684 ~(AVIVO_DC_LB_DISP1_END_ADR_MASK <<
685 AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
686 if (mode1) {
687 dc_lb_memory_split |=
688 ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK)
689 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
690 } else if (mode2) {
691 dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
692 }
693#endif
694 WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
695}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c550932a108f..f1ba8ff41130 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -110,7 +110,7 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
110 if (i < 0 || i > rdev->gart.num_gpu_pages) { 110 if (i < 0 || i > rdev->gart.num_gpu_pages) {
111 return -EINVAL; 111 return -EINVAL;
112 } 112 }
113 rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr); 113 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
114 return 0; 114 return 0;
115} 115}
116 116
@@ -173,8 +173,12 @@ void r100_mc_setup(struct radeon_device *rdev)
173 DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); 173 DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
174 } 174 }
175 /* Write VRAM size in case we are limiting it */ 175 /* Write VRAM size in case we are limiting it */
176 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); 176 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
177 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 177 /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
178 * if the aperture is 64MB but we have 32MB VRAM
179 * we report only 32MB VRAM but we have to set MC_FB_LOCATION
180 * to 64MB, otherwise the gpu accidentially dies */
181 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
178 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); 182 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
179 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); 183 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
180 WREG32(RADEON_MC_FB_LOCATION, tmp); 184 WREG32(RADEON_MC_FB_LOCATION, tmp);
@@ -215,7 +219,6 @@ int r100_mc_init(struct radeon_device *rdev)
215 r100_pci_gart_disable(rdev); 219 r100_pci_gart_disable(rdev);
216 220
217 /* Setup GPU memory space */ 221 /* Setup GPU memory space */
218 rdev->mc.vram_location = 0xFFFFFFFFUL;
219 rdev->mc.gtt_location = 0xFFFFFFFFUL; 222 rdev->mc.gtt_location = 0xFFFFFFFFUL;
220 if (rdev->flags & RADEON_IS_AGP) { 223 if (rdev->flags & RADEON_IS_AGP) {
221 r = radeon_agp_init(rdev); 224 r = radeon_agp_init(rdev);
@@ -719,13 +722,14 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
719 unsigned idx) 722 unsigned idx)
720{ 723{
721 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 724 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
722 uint32_t header = ib_chunk->kdata[idx]; 725 uint32_t header;
723 726
724 if (idx >= ib_chunk->length_dw) { 727 if (idx >= ib_chunk->length_dw) {
725 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 728 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
726 idx, ib_chunk->length_dw); 729 idx, ib_chunk->length_dw);
727 return -EINVAL; 730 return -EINVAL;
728 } 731 }
732 header = ib_chunk->kdata[idx];
729 pkt->idx = idx; 733 pkt->idx = idx;
730 pkt->type = CP_PACKET_GET_TYPE(header); 734 pkt->type = CP_PACKET_GET_TYPE(header);
731 pkt->count = CP_PACKET_GET_COUNT(header); 735 pkt->count = CP_PACKET_GET_COUNT(header);
@@ -753,6 +757,102 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
753} 757}
754 758
755/** 759/**
760 * r100_cs_packet_next_vline() - parse userspace VLINE packet
761 * @parser: parser structure holding parsing context.
762 *
763 * Userspace sends a special sequence for VLINE waits.
764 * PACKET0 - VLINE_START_END + value
765 * PACKET0 - WAIT_UNTIL +_value
766 * RELOC (P3) - crtc_id in reloc.
767 *
768 * This function parses this and relocates the VLINE START END
769 * and WAIT UNTIL packets to the correct crtc.
770 * It also detects a switched off crtc and nulls out the
771 * wait in that case.
772 */
773int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
774{
775 struct radeon_cs_chunk *ib_chunk;
776 struct drm_mode_object *obj;
777 struct drm_crtc *crtc;
778 struct radeon_crtc *radeon_crtc;
779 struct radeon_cs_packet p3reloc, waitreloc;
780 int crtc_id;
781 int r;
782 uint32_t header, h_idx, reg;
783
784 ib_chunk = &p->chunks[p->chunk_ib_idx];
785
786 /* parse the wait until */
787 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
788 if (r)
789 return r;
790
791 /* check its a wait until and only 1 count */
792 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
793 waitreloc.count != 0) {
794 DRM_ERROR("vline wait had illegal wait until segment\n");
795 r = -EINVAL;
796 return r;
797 }
798
799 if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
800 DRM_ERROR("vline wait had illegal wait until\n");
801 r = -EINVAL;
802 return r;
803 }
804
805 /* jump over the NOP */
806 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
807 if (r)
808 return r;
809
810 h_idx = p->idx - 2;
811 p->idx += waitreloc.count;
812 p->idx += p3reloc.count;
813
814 header = ib_chunk->kdata[h_idx];
815 crtc_id = ib_chunk->kdata[h_idx + 5];
816 reg = ib_chunk->kdata[h_idx] >> 2;
817 mutex_lock(&p->rdev->ddev->mode_config.mutex);
818 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
819 if (!obj) {
820 DRM_ERROR("cannot find crtc %d\n", crtc_id);
821 r = -EINVAL;
822 goto out;
823 }
824 crtc = obj_to_crtc(obj);
825 radeon_crtc = to_radeon_crtc(crtc);
826 crtc_id = radeon_crtc->crtc_id;
827
828 if (!crtc->enabled) {
829 /* if the CRTC isn't enabled - we need to nop out the wait until */
830 ib_chunk->kdata[h_idx + 2] = PACKET2(0);
831 ib_chunk->kdata[h_idx + 3] = PACKET2(0);
832 } else if (crtc_id == 1) {
833 switch (reg) {
834 case AVIVO_D1MODE_VLINE_START_END:
835 header &= R300_CP_PACKET0_REG_MASK;
836 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
837 break;
838 case RADEON_CRTC_GUI_TRIG_VLINE:
839 header &= R300_CP_PACKET0_REG_MASK;
840 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
841 break;
842 default:
843 DRM_ERROR("unknown crtc reloc\n");
844 r = -EINVAL;
845 goto out;
846 }
847 ib_chunk->kdata[h_idx] = header;
848 ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
849 }
850out:
851 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
852 return r;
853}
854
855/**
756 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 856 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
757 * @parser: parser structure holding parsing context. 857 * @parser: parser structure holding parsing context.
758 * @data: pointer to relocation data 858 * @data: pointer to relocation data
@@ -814,6 +914,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
814 unsigned idx; 914 unsigned idx;
815 bool onereg; 915 bool onereg;
816 int r; 916 int r;
917 u32 tile_flags = 0;
817 918
818 ib = p->ib->ptr; 919 ib = p->ib->ptr;
819 ib_chunk = &p->chunks[p->chunk_ib_idx]; 920 ib_chunk = &p->chunks[p->chunk_ib_idx];
@@ -825,6 +926,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
825 } 926 }
826 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { 927 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
827 switch (reg) { 928 switch (reg) {
929 case RADEON_CRTC_GUI_TRIG_VLINE:
930 r = r100_cs_packet_parse_vline(p);
931 if (r) {
932 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
933 idx, reg);
934 r100_cs_dump_packet(p, pkt);
935 return r;
936 }
937 break;
828 /* FIXME: only allow PACKET3 blit? easier to check for out of 938 /* FIXME: only allow PACKET3 blit? easier to check for out of
829 * range access */ 939 * range access */
830 case RADEON_DST_PITCH_OFFSET: 940 case RADEON_DST_PITCH_OFFSET:
@@ -838,7 +948,20 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
838 } 948 }
839 tmp = ib_chunk->kdata[idx] & 0x003fffff; 949 tmp = ib_chunk->kdata[idx] & 0x003fffff;
840 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 950 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
841 ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; 951
952 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
953 tile_flags |= RADEON_DST_TILE_MACRO;
954 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
955 if (reg == RADEON_SRC_PITCH_OFFSET) {
956 DRM_ERROR("Cannot src blit from microtiled surface\n");
957 r100_cs_dump_packet(p, pkt);
958 return -EINVAL;
959 }
960 tile_flags |= RADEON_DST_TILE_MICRO;
961 }
962
963 tmp |= tile_flags;
964 ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
842 break; 965 break;
843 case RADEON_RB3D_DEPTHOFFSET: 966 case RADEON_RB3D_DEPTHOFFSET:
844 case RADEON_RB3D_COLOROFFSET: 967 case RADEON_RB3D_COLOROFFSET:
@@ -869,6 +992,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
869 case R300_TX_OFFSET_0+52: 992 case R300_TX_OFFSET_0+52:
870 case R300_TX_OFFSET_0+56: 993 case R300_TX_OFFSET_0+56:
871 case R300_TX_OFFSET_0+60: 994 case R300_TX_OFFSET_0+60:
995 /* rn50 has no 3D engine so fail on any 3d setup */
996 if (ASIC_IS_RN50(p->rdev)) {
997 DRM_ERROR("attempt to use RN50 3D engine failed\n");
998 return -EINVAL;
999 }
872 r = r100_cs_packet_next_reloc(p, &reloc); 1000 r = r100_cs_packet_next_reloc(p, &reloc);
873 if (r) { 1001 if (r) {
874 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1002 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -878,6 +1006,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
878 } 1006 }
879 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1007 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
880 break; 1008 break;
1009 case R300_RB3D_COLORPITCH0:
1010 case RADEON_RB3D_COLORPITCH:
1011 r = r100_cs_packet_next_reloc(p, &reloc);
1012 if (r) {
1013 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1014 idx, reg);
1015 r100_cs_dump_packet(p, pkt);
1016 return r;
1017 }
1018
1019 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1020 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1021 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1022 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1023
1024 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1025 tmp |= tile_flags;
1026 ib[idx] = tmp;
1027 break;
881 default: 1028 default:
882 /* FIXME: we don't want to allow anyothers packet */ 1029 /* FIXME: we don't want to allow anyothers packet */
883 break; 1030 break;
@@ -1256,29 +1403,100 @@ static void r100_vram_get_type(struct radeon_device *rdev)
1256 } 1403 }
1257} 1404}
1258 1405
1259void r100_vram_info(struct radeon_device *rdev) 1406static u32 r100_get_accessible_vram(struct radeon_device *rdev)
1260{ 1407{
1261 r100_vram_get_type(rdev); 1408 u32 aper_size;
1409 u8 byte;
1410
1411 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1412
1413 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
1414 * that is has the 2nd generation multifunction PCI interface
1415 */
1416 if (rdev->family == CHIP_RV280 ||
1417 rdev->family >= CHIP_RV350) {
1418 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
1419 ~RADEON_HDP_APER_CNTL);
1420 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
1421 return aper_size * 2;
1422 }
1423
1424 /* Older cards have all sorts of funny issues to deal with. First
1425 * check if it's a multifunction card by reading the PCI config
1426 * header type... Limit those to one aperture size
1427 */
1428 pci_read_config_byte(rdev->pdev, 0xe, &byte);
1429 if (byte & 0x80) {
1430 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
1431 DRM_INFO("Limiting VRAM to one aperture\n");
1432 return aper_size;
1433 }
1434
1435 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
1436 * have set it up. We don't write this as it's broken on some ASICs but
1437 * we expect the BIOS to have done the right thing (might be too optimistic...)
1438 */
1439 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
1440 return aper_size * 2;
1441 return aper_size;
1442}
1443
1444void r100_vram_init_sizes(struct radeon_device *rdev)
1445{
1446 u64 config_aper_size;
1447 u32 accessible;
1448
1449 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1262 1450
1263 if (rdev->flags & RADEON_IS_IGP) { 1451 if (rdev->flags & RADEON_IS_IGP) {
1264 uint32_t tom; 1452 uint32_t tom;
1265 /* read NB_TOM to get the amount of ram stolen for the GPU */ 1453 /* read NB_TOM to get the amount of ram stolen for the GPU */
1266 tom = RREG32(RADEON_NB_TOM); 1454 tom = RREG32(RADEON_NB_TOM);
1267 rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 1455 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1268 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); 1456 /* for IGPs we need to keep VRAM where it was put by the BIOS */
1457 rdev->mc.vram_location = (tom & 0xffff) << 16;
1458 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1459 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1269 } else { 1460 } else {
1270 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 1461 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
1271 /* Some production boards of m6 will report 0 1462 /* Some production boards of m6 will report 0
1272 * if it's 8 MB 1463 * if it's 8 MB
1273 */ 1464 */
1274 if (rdev->mc.vram_size == 0) { 1465 if (rdev->mc.real_vram_size == 0) {
1275 rdev->mc.vram_size = 8192 * 1024; 1466 rdev->mc.real_vram_size = 8192 * 1024;
1276 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); 1467 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1277 } 1468 }
1469 /* let driver place VRAM */
1470 rdev->mc.vram_location = 0xFFFFFFFFUL;
1471 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
1472 * Novell bug 204882 + along with lots of ubuntu ones */
1473 if (config_aper_size > rdev->mc.real_vram_size)
1474 rdev->mc.mc_vram_size = config_aper_size;
1475 else
1476 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1278 } 1477 }
1279 1478
1479 /* work out accessible VRAM */
1480 accessible = r100_get_accessible_vram(rdev);
1481
1280 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 1482 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1281 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 1483 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1484
1485 if (accessible > rdev->mc.aper_size)
1486 accessible = rdev->mc.aper_size;
1487
1488 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
1489 rdev->mc.mc_vram_size = rdev->mc.aper_size;
1490
1491 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
1492 rdev->mc.real_vram_size = rdev->mc.aper_size;
1493}
1494
1495void r100_vram_info(struct radeon_device *rdev)
1496{
1497 r100_vram_get_type(rdev);
1498
1499 r100_vram_init_sizes(rdev);
1282} 1500}
1283 1501
1284 1502
@@ -1533,3 +1751,530 @@ int r100_debugfs_mc_info_init(struct radeon_device *rdev)
1533 return 0; 1751 return 0;
1534#endif 1752#endif
1535} 1753}
1754
1755int r100_set_surface_reg(struct radeon_device *rdev, int reg,
1756 uint32_t tiling_flags, uint32_t pitch,
1757 uint32_t offset, uint32_t obj_size)
1758{
1759 int surf_index = reg * 16;
1760 int flags = 0;
1761
1762 /* r100/r200 divide by 16 */
1763 if (rdev->family < CHIP_R300)
1764 flags = pitch / 16;
1765 else
1766 flags = pitch / 8;
1767
1768 if (rdev->family <= CHIP_RS200) {
1769 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
1770 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
1771 flags |= RADEON_SURF_TILE_COLOR_BOTH;
1772 if (tiling_flags & RADEON_TILING_MACRO)
1773 flags |= RADEON_SURF_TILE_COLOR_MACRO;
1774 } else if (rdev->family <= CHIP_RV280) {
1775 if (tiling_flags & (RADEON_TILING_MACRO))
1776 flags |= R200_SURF_TILE_COLOR_MACRO;
1777 if (tiling_flags & RADEON_TILING_MICRO)
1778 flags |= R200_SURF_TILE_COLOR_MICRO;
1779 } else {
1780 if (tiling_flags & RADEON_TILING_MACRO)
1781 flags |= R300_SURF_TILE_MACRO;
1782 if (tiling_flags & RADEON_TILING_MICRO)
1783 flags |= R300_SURF_TILE_MICRO;
1784 }
1785
1786 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
1787 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
1788 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
1789 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
1790 return 0;
1791}
1792
1793void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
1794{
1795 int surf_index = reg * 16;
1796 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
1797}
1798
1799void r100_bandwidth_update(struct radeon_device *rdev)
1800{
1801 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
1802 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
1803 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
1804 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
1805 fixed20_12 memtcas_ff[8] = {
1806 fixed_init(1),
1807 fixed_init(2),
1808 fixed_init(3),
1809 fixed_init(0),
1810 fixed_init_half(1),
1811 fixed_init_half(2),
1812 fixed_init(0),
1813 };
1814 fixed20_12 memtcas_rs480_ff[8] = {
1815 fixed_init(0),
1816 fixed_init(1),
1817 fixed_init(2),
1818 fixed_init(3),
1819 fixed_init(0),
1820 fixed_init_half(1),
1821 fixed_init_half(2),
1822 fixed_init_half(3),
1823 };
1824 fixed20_12 memtcas2_ff[8] = {
1825 fixed_init(0),
1826 fixed_init(1),
1827 fixed_init(2),
1828 fixed_init(3),
1829 fixed_init(4),
1830 fixed_init(5),
1831 fixed_init(6),
1832 fixed_init(7),
1833 };
1834 fixed20_12 memtrbs[8] = {
1835 fixed_init(1),
1836 fixed_init_half(1),
1837 fixed_init(2),
1838 fixed_init_half(2),
1839 fixed_init(3),
1840 fixed_init_half(3),
1841 fixed_init(4),
1842 fixed_init_half(4)
1843 };
1844 fixed20_12 memtrbs_r4xx[8] = {
1845 fixed_init(4),
1846 fixed_init(5),
1847 fixed_init(6),
1848 fixed_init(7),
1849 fixed_init(8),
1850 fixed_init(9),
1851 fixed_init(10),
1852 fixed_init(11)
1853 };
1854 fixed20_12 min_mem_eff;
1855 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
1856 fixed20_12 cur_latency_mclk, cur_latency_sclk;
1857 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
1858 disp_drain_rate2, read_return_rate;
1859 fixed20_12 time_disp1_drop_priority;
1860 int c;
1861 int cur_size = 16; /* in octawords */
1862 int critical_point = 0, critical_point2;
1863/* uint32_t read_return_rate, time_disp1_drop_priority; */
1864 int stop_req, max_stop_req;
1865 struct drm_display_mode *mode1 = NULL;
1866 struct drm_display_mode *mode2 = NULL;
1867 uint32_t pixel_bytes1 = 0;
1868 uint32_t pixel_bytes2 = 0;
1869
1870 if (rdev->mode_info.crtcs[0]->base.enabled) {
1871 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
1872 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
1873 }
1874 if (rdev->mode_info.crtcs[1]->base.enabled) {
1875 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
1876 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
1877 }
1878
1879 min_mem_eff.full = rfixed_const_8(0);
1880 /* get modes */
1881 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
1882 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
1883 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
1884 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
1885 /* check crtc enables */
1886 if (mode2)
1887 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
1888 if (mode1)
1889 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
1890 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
1891 }
1892
1893 /*
1894 * determine is there is enough bw for current mode
1895 */
1896 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
1897 temp_ff.full = rfixed_const(100);
1898 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
1899 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
1900 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
1901
1902 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
1903 temp_ff.full = rfixed_const(temp);
1904 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
1905
1906 pix_clk.full = 0;
1907 pix_clk2.full = 0;
1908 peak_disp_bw.full = 0;
1909 if (mode1) {
1910 temp_ff.full = rfixed_const(1000);
1911 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
1912 pix_clk.full = rfixed_div(pix_clk, temp_ff);
1913 temp_ff.full = rfixed_const(pixel_bytes1);
1914 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
1915 }
1916 if (mode2) {
1917 temp_ff.full = rfixed_const(1000);
1918 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
1919 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
1920 temp_ff.full = rfixed_const(pixel_bytes2);
1921 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
1922 }
1923
1924 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
1925 if (peak_disp_bw.full >= mem_bw.full) {
1926 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
1927 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
1928 }
1929
1930 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
1931 temp = RREG32(RADEON_MEM_TIMING_CNTL);
1932 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
1933 mem_trcd = ((temp >> 2) & 0x3) + 1;
1934 mem_trp = ((temp & 0x3)) + 1;
1935 mem_tras = ((temp & 0x70) >> 4) + 1;
1936 } else if (rdev->family == CHIP_R300 ||
1937 rdev->family == CHIP_R350) { /* r300, r350 */
1938 mem_trcd = (temp & 0x7) + 1;
1939 mem_trp = ((temp >> 8) & 0x7) + 1;
1940 mem_tras = ((temp >> 11) & 0xf) + 4;
1941 } else if (rdev->family == CHIP_RV350 ||
1942 rdev->family <= CHIP_RV380) {
1943 /* rv3x0 */
1944 mem_trcd = (temp & 0x7) + 3;
1945 mem_trp = ((temp >> 8) & 0x7) + 3;
1946 mem_tras = ((temp >> 11) & 0xf) + 6;
1947 } else if (rdev->family == CHIP_R420 ||
1948 rdev->family == CHIP_R423 ||
1949 rdev->family == CHIP_RV410) {
1950 /* r4xx */
1951 mem_trcd = (temp & 0xf) + 3;
1952 if (mem_trcd > 15)
1953 mem_trcd = 15;
1954 mem_trp = ((temp >> 8) & 0xf) + 3;
1955 if (mem_trp > 15)
1956 mem_trp = 15;
1957 mem_tras = ((temp >> 12) & 0x1f) + 6;
1958 if (mem_tras > 31)
1959 mem_tras = 31;
1960 } else { /* RV200, R200 */
1961 mem_trcd = (temp & 0x7) + 1;
1962 mem_trp = ((temp >> 8) & 0x7) + 1;
1963 mem_tras = ((temp >> 12) & 0xf) + 4;
1964 }
1965 /* convert to FF */
1966 trcd_ff.full = rfixed_const(mem_trcd);
1967 trp_ff.full = rfixed_const(mem_trp);
1968 tras_ff.full = rfixed_const(mem_tras);
1969
1970 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
1971 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
1972 data = (temp & (7 << 20)) >> 20;
1973 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
1974 if (rdev->family == CHIP_RS480) /* don't think rs400 */
1975 tcas_ff = memtcas_rs480_ff[data];
1976 else
1977 tcas_ff = memtcas_ff[data];
1978 } else
1979 tcas_ff = memtcas2_ff[data];
1980
1981 if (rdev->family == CHIP_RS400 ||
1982 rdev->family == CHIP_RS480) {
1983 /* extra cas latency stored in bits 23-25 0-4 clocks */
1984 data = (temp >> 23) & 0x7;
1985 if (data < 5)
1986 tcas_ff.full += rfixed_const(data);
1987 }
1988
1989 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
1990 /* on the R300, Tcas is included in Trbs.
1991 */
1992 temp = RREG32(RADEON_MEM_CNTL);
1993 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
1994 if (data == 1) {
1995 if (R300_MEM_USE_CD_CH_ONLY & temp) {
1996 temp = RREG32(R300_MC_IND_INDEX);
1997 temp &= ~R300_MC_IND_ADDR_MASK;
1998 temp |= R300_MC_READ_CNTL_CD_mcind;
1999 WREG32(R300_MC_IND_INDEX, temp);
2000 temp = RREG32(R300_MC_IND_DATA);
2001 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
2002 } else {
2003 temp = RREG32(R300_MC_READ_CNTL_AB);
2004 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2005 }
2006 } else {
2007 temp = RREG32(R300_MC_READ_CNTL_AB);
2008 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2009 }
2010 if (rdev->family == CHIP_RV410 ||
2011 rdev->family == CHIP_R420 ||
2012 rdev->family == CHIP_R423)
2013 trbs_ff = memtrbs_r4xx[data];
2014 else
2015 trbs_ff = memtrbs[data];
2016 tcas_ff.full += trbs_ff.full;
2017 }
2018
2019 sclk_eff_ff.full = sclk_ff.full;
2020
2021 if (rdev->flags & RADEON_IS_AGP) {
2022 fixed20_12 agpmode_ff;
2023 agpmode_ff.full = rfixed_const(radeon_agpmode);
2024 temp_ff.full = rfixed_const_666(16);
2025 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
2026 }
2027 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2028
2029 if (ASIC_IS_R300(rdev)) {
2030 sclk_delay_ff.full = rfixed_const(250);
2031 } else {
2032 if ((rdev->family == CHIP_RV100) ||
2033 rdev->flags & RADEON_IS_IGP) {
2034 if (rdev->mc.vram_is_ddr)
2035 sclk_delay_ff.full = rfixed_const(41);
2036 else
2037 sclk_delay_ff.full = rfixed_const(33);
2038 } else {
2039 if (rdev->mc.vram_width == 128)
2040 sclk_delay_ff.full = rfixed_const(57);
2041 else
2042 sclk_delay_ff.full = rfixed_const(41);
2043 }
2044 }
2045
2046 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
2047
2048 if (rdev->mc.vram_is_ddr) {
2049 if (rdev->mc.vram_width == 32) {
2050 k1.full = rfixed_const(40);
2051 c = 3;
2052 } else {
2053 k1.full = rfixed_const(20);
2054 c = 1;
2055 }
2056 } else {
2057 k1.full = rfixed_const(40);
2058 c = 3;
2059 }
2060
2061 temp_ff.full = rfixed_const(2);
2062 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
2063 temp_ff.full = rfixed_const(c);
2064 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
2065 temp_ff.full = rfixed_const(4);
2066 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
2067 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
2068 mc_latency_mclk.full += k1.full;
2069
2070 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
2071 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
2072
2073 /*
2074 HW cursor time assuming worst case of full size colour cursor.
2075 */
2076 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2077 temp_ff.full += trcd_ff.full;
2078 if (temp_ff.full < tras_ff.full)
2079 temp_ff.full = tras_ff.full;
2080 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
2081
2082 temp_ff.full = rfixed_const(cur_size);
2083 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
2084 /*
2085 Find the total latency for the display data.
2086 */
2087 disp_latency_overhead.full = rfixed_const(80);
2088 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
2089 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2090 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2091
2092 if (mc_latency_mclk.full > mc_latency_sclk.full)
2093 disp_latency.full = mc_latency_mclk.full;
2094 else
2095 disp_latency.full = mc_latency_sclk.full;
2096
2097 /* setup Max GRPH_STOP_REQ default value */
2098 if (ASIC_IS_RV100(rdev))
2099 max_stop_req = 0x5c;
2100 else
2101 max_stop_req = 0x7c;
2102
2103 if (mode1) {
2104 /* CRTC1
2105 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2106 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2107 */
2108 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2109
2110 if (stop_req > max_stop_req)
2111 stop_req = max_stop_req;
2112
2113 /*
2114 Find the drain rate of the display buffer.
2115 */
2116 temp_ff.full = rfixed_const((16/pixel_bytes1));
2117 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
2118
2119 /*
2120 Find the critical point of the display buffer.
2121 */
2122 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
2123 crit_point_ff.full += rfixed_const_half(0);
2124
2125 critical_point = rfixed_trunc(crit_point_ff);
2126
2127 if (rdev->disp_priority == 2) {
2128 critical_point = 0;
2129 }
2130
2131 /*
2132 The critical point should never be above max_stop_req-4. Setting
2133 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2134 */
2135 if (max_stop_req - critical_point < 4)
2136 critical_point = 0;
2137
2138 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2139 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2140 critical_point = 0x10;
2141 }
2142
2143 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
2144 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
2145 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2146 temp &= ~(RADEON_GRPH_START_REQ_MASK);
2147 if ((rdev->family == CHIP_R350) &&
2148 (stop_req > 0x15)) {
2149 stop_req -= 0x10;
2150 }
2151 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2152 temp |= RADEON_GRPH_BUFFER_SIZE;
2153 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
2154 RADEON_GRPH_CRITICAL_AT_SOF |
2155 RADEON_GRPH_STOP_CNTL);
2156 /*
2157 Write the result into the register.
2158 */
2159 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2160 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2161
2162#if 0
2163 if ((rdev->family == CHIP_RS400) ||
2164 (rdev->family == CHIP_RS480)) {
2165 /* attempt to program RS400 disp regs correctly ??? */
2166 temp = RREG32(RS400_DISP1_REG_CNTL);
2167 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
2168 RS400_DISP1_STOP_REQ_LEVEL_MASK);
2169 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
2170 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2171 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2172 temp = RREG32(RS400_DMIF_MEM_CNTL1);
2173 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
2174 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
2175 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
2176 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
2177 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
2178 }
2179#endif
2180
2181 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
2182 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
2183 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
2184 }
2185
2186 if (mode2) {
2187 u32 grph2_cntl;
2188 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
2189
2190 if (stop_req > max_stop_req)
2191 stop_req = max_stop_req;
2192
2193 /*
2194 Find the drain rate of the display buffer.
2195 */
2196 temp_ff.full = rfixed_const((16/pixel_bytes2));
2197 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
2198
2199 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
2200 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
2201 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2202 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
2203 if ((rdev->family == CHIP_R350) &&
2204 (stop_req > 0x15)) {
2205 stop_req -= 0x10;
2206 }
2207 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2208 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
2209 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
2210 RADEON_GRPH_CRITICAL_AT_SOF |
2211 RADEON_GRPH_STOP_CNTL);
2212
2213 if ((rdev->family == CHIP_RS100) ||
2214 (rdev->family == CHIP_RS200))
2215 critical_point2 = 0;
2216 else {
2217 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
2218 temp_ff.full = rfixed_const(temp);
2219 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
2220 if (sclk_ff.full < temp_ff.full)
2221 temp_ff.full = sclk_ff.full;
2222
2223 read_return_rate.full = temp_ff.full;
2224
2225 if (mode1) {
2226 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
2227 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
2228 } else {
2229 time_disp1_drop_priority.full = 0;
2230 }
2231 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
2232 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
2233 crit_point_ff.full += rfixed_const_half(0);
2234
2235 critical_point2 = rfixed_trunc(crit_point_ff);
2236
2237 if (rdev->disp_priority == 2) {
2238 critical_point2 = 0;
2239 }
2240
2241 if (max_stop_req - critical_point2 < 4)
2242 critical_point2 = 0;
2243
2244 }
2245
2246 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
2247 /* some R300 cards have problem with this set to 0 */
2248 critical_point2 = 0x10;
2249 }
2250
2251 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2252 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2253
2254 if ((rdev->family == CHIP_RS400) ||
2255 (rdev->family == CHIP_RS480)) {
2256#if 0
2257 /* attempt to program RS400 disp2 regs correctly ??? */
2258 temp = RREG32(RS400_DISP2_REQ_CNTL1);
2259 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
2260 RS400_DISP2_STOP_REQ_LEVEL_MASK);
2261 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
2262 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2263 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2264 temp = RREG32(RS400_DISP2_REQ_CNTL2);
2265 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
2266 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
2267 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
2268 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
2269 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
2270#endif
2271 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
2272 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
2273 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
2274 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
2275 }
2276
2277 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
2278 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
2279 }
2280}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index e2ed5bc08170..9c8d41534a5d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -30,6 +30,8 @@
30#include "drm.h" 30#include "drm.h"
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon.h" 32#include "radeon.h"
33#include "radeon_drm.h"
34#include "radeon_share.h"
33 35
34/* r300,r350,rv350,rv370,rv380 depends on : */ 36/* r300,r350,rv350,rv370,rv380 depends on : */
35void r100_hdp_reset(struct radeon_device *rdev); 37void r100_hdp_reset(struct radeon_device *rdev);
@@ -44,6 +46,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev);
44int r100_cs_packet_parse(struct radeon_cs_parser *p, 46int r100_cs_packet_parse(struct radeon_cs_parser *p,
45 struct radeon_cs_packet *pkt, 47 struct radeon_cs_packet *pkt,
46 unsigned idx); 48 unsigned idx);
49int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
47int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, 50int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
48 struct radeon_cs_reloc **cs_reloc); 51 struct radeon_cs_reloc **cs_reloc);
49int r100_cs_parse_packet0(struct radeon_cs_parser *p, 52int r100_cs_parse_packet0(struct radeon_cs_parser *p,
@@ -150,8 +153,13 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
150 if (i < 0 || i > rdev->gart.num_gpu_pages) { 153 if (i < 0 || i > rdev->gart.num_gpu_pages) {
151 return -EINVAL; 154 return -EINVAL;
152 } 155 }
153 addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; 156 addr = (lower_32_bits(addr) >> 8) |
154 writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); 157 ((upper_32_bits(addr) & 0xff) << 24) |
158 0xc;
159 /* on x86 we want this to be CPU endian, on powerpc
160 * on powerpc without HW swappers, it'll get swapped on way
161 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
162 writel(addr, ((void __iomem *)ptr) + (i * 4));
155 return 0; 163 return 0;
156} 164}
157 165
@@ -579,10 +587,8 @@ void r300_vram_info(struct radeon_device *rdev)
579 } else { 587 } else {
580 rdev->mc.vram_width = 64; 588 rdev->mc.vram_width = 64;
581 } 589 }
582 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
583 590
584 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 591 r100_vram_init_sizes(rdev);
585 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
586} 592}
587 593
588 594
@@ -970,7 +976,7 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track)
970 976
971static const unsigned r300_reg_safe_bm[159] = { 977static const unsigned r300_reg_safe_bm[159] = {
972 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 978 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
973 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, 979 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
974 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 980 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
975 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 981 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
976 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 982 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
@@ -1019,7 +1025,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1019 struct radeon_cs_reloc *reloc; 1025 struct radeon_cs_reloc *reloc;
1020 struct r300_cs_track *track; 1026 struct r300_cs_track *track;
1021 volatile uint32_t *ib; 1027 volatile uint32_t *ib;
1022 uint32_t tmp; 1028 uint32_t tmp, tile_flags = 0;
1023 unsigned i; 1029 unsigned i;
1024 int r; 1030 int r;
1025 1031
@@ -1027,6 +1033,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1027 ib_chunk = &p->chunks[p->chunk_ib_idx]; 1033 ib_chunk = &p->chunks[p->chunk_ib_idx];
1028 track = (struct r300_cs_track*)p->track; 1034 track = (struct r300_cs_track*)p->track;
1029 switch(reg) { 1035 switch(reg) {
1036 case AVIVO_D1MODE_VLINE_START_END:
1037 case RADEON_CRTC_GUI_TRIG_VLINE:
1038 r = r100_cs_packet_parse_vline(p);
1039 if (r) {
1040 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1041 idx, reg);
1042 r100_cs_dump_packet(p, pkt);
1043 return r;
1044 }
1045 break;
1030 case RADEON_DST_PITCH_OFFSET: 1046 case RADEON_DST_PITCH_OFFSET:
1031 case RADEON_SRC_PITCH_OFFSET: 1047 case RADEON_SRC_PITCH_OFFSET:
1032 r = r100_cs_packet_next_reloc(p, &reloc); 1048 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1038,7 +1054,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1038 } 1054 }
1039 tmp = ib_chunk->kdata[idx] & 0x003fffff; 1055 tmp = ib_chunk->kdata[idx] & 0x003fffff;
1040 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 1056 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1041 ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; 1057
1058 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1059 tile_flags |= RADEON_DST_TILE_MACRO;
1060 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1061 if (reg == RADEON_SRC_PITCH_OFFSET) {
1062 DRM_ERROR("Cannot src blit from microtiled surface\n");
1063 r100_cs_dump_packet(p, pkt);
1064 return -EINVAL;
1065 }
1066 tile_flags |= RADEON_DST_TILE_MICRO;
1067 }
1068 tmp |= tile_flags;
1069 ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
1042 break; 1070 break;
1043 case R300_RB3D_COLOROFFSET0: 1071 case R300_RB3D_COLOROFFSET0:
1044 case R300_RB3D_COLOROFFSET1: 1072 case R300_RB3D_COLOROFFSET1:
@@ -1127,6 +1155,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1127 /* RB3D_COLORPITCH1 */ 1155 /* RB3D_COLORPITCH1 */
1128 /* RB3D_COLORPITCH2 */ 1156 /* RB3D_COLORPITCH2 */
1129 /* RB3D_COLORPITCH3 */ 1157 /* RB3D_COLORPITCH3 */
1158 r = r100_cs_packet_next_reloc(p, &reloc);
1159 if (r) {
1160 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1161 idx, reg);
1162 r100_cs_dump_packet(p, pkt);
1163 return r;
1164 }
1165
1166 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1167 tile_flags |= R300_COLOR_TILE_ENABLE;
1168 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1169 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
1170
1171 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1172 tmp |= tile_flags;
1173 ib[idx] = tmp;
1174
1130 i = (reg - 0x4E38) >> 2; 1175 i = (reg - 0x4E38) >> 2;
1131 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; 1176 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
1132 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { 1177 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
@@ -1182,6 +1227,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1182 break; 1227 break;
1183 case 0x4F24: 1228 case 0x4F24:
1184 /* ZB_DEPTHPITCH */ 1229 /* ZB_DEPTHPITCH */
1230 r = r100_cs_packet_next_reloc(p, &reloc);
1231 if (r) {
1232 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1233 idx, reg);
1234 r100_cs_dump_packet(p, pkt);
1235 return r;
1236 }
1237
1238 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1239 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
1240 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1241 tile_flags |= R300_DEPTHMICROTILE_TILED;;
1242
1243 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1244 tmp |= tile_flags;
1245 ib[idx] = tmp;
1246
1185 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; 1247 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
1186 break; 1248 break;
1187 case 0x4104: 1249 case 0x4104:
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 70f48609515e..4b7afef35a65 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -27,7 +27,9 @@
27#ifndef _R300_REG_H_ 27#ifndef _R300_REG_H_
28#define _R300_REG_H_ 28#define _R300_REG_H_
29 29
30 30#define R300_SURF_TILE_MACRO (1<<16)
31#define R300_SURF_TILE_MICRO (2<<16)
32#define R300_SURF_TILE_BOTH (3<<16)
31 33
32 34
33#define R300_MC_INIT_MISC_LAT_TIMER 0x180 35#define R300_MC_INIT_MISC_LAT_TIMER 0x180
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 9070a1c2ce23..036691b38cb7 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -445,6 +445,7 @@
445#define AVIVO_D1MODE_DATA_FORMAT 0x6528 445#define AVIVO_D1MODE_DATA_FORMAT 0x6528
446# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) 446# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0)
447#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C 447#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C
448#define AVIVO_D1MODE_VLINE_START_END 0x6538
448#define AVIVO_D1MODE_VIEWPORT_START 0x6580 449#define AVIVO_D1MODE_VIEWPORT_START 0x6580
449#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 450#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584
450#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 451#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588
@@ -496,6 +497,7 @@
496#define AVIVO_D2CUR_SIZE 0x6c10 497#define AVIVO_D2CUR_SIZE 0x6c10
497#define AVIVO_D2CUR_POSITION 0x6c14 498#define AVIVO_D2CUR_POSITION 0x6c14
498 499
500#define AVIVO_D2MODE_VLINE_START_END 0x6d38
499#define AVIVO_D2MODE_VIEWPORT_START 0x6d80 501#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
500#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 502#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
501#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 503#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 570a244bd88b..09fb0b6ec7dd 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -28,6 +28,7 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h" 29#include "radeon_reg.h"
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_share.h"
31 32
32/* r520,rv530,rv560,rv570,r580 depends on : */ 33/* r520,rv530,rv560,rv570,r580 depends on : */
33void r100_hdp_reset(struct radeon_device *rdev); 34void r100_hdp_reset(struct radeon_device *rdev);
@@ -94,8 +95,8 @@ int r520_mc_init(struct radeon_device *rdev)
94 "programming pipes. Bad things might happen.\n"); 95 "programming pipes. Bad things might happen.\n");
95 } 96 }
96 /* Write VRAM size in case we are limiting it */ 97 /* Write VRAM size in case we are limiting it */
97 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); 98 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
98 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 99 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
99 tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); 100 tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
100 tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); 101 tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
101 WREG32_MC(R520_MC_FB_LOCATION, tmp); 102 WREG32_MC(R520_MC_FB_LOCATION, tmp);
@@ -226,9 +227,20 @@ static void r520_vram_get_type(struct radeon_device *rdev)
226 227
227void r520_vram_info(struct radeon_device *rdev) 228void r520_vram_info(struct radeon_device *rdev)
228{ 229{
230 fixed20_12 a;
231
229 r520_vram_get_type(rdev); 232 r520_vram_get_type(rdev);
230 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
231 233
232 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 234 r100_vram_init_sizes(rdev);
233 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 235 /* FIXME: we should enforce default clock in case GPU is not in
236 * default setup
237 */
238 a.full = rfixed_const(100);
239 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
240 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
241}
242
243void r520_bandwidth_update(struct radeon_device *rdev)
244{
245 rv515_bandwidth_avivo_update(rdev);
234} 246}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c45559fc97fd..538cd907df69 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -67,7 +67,7 @@ int r600_mc_init(struct radeon_device *rdev)
67 "programming pipes. Bad things might happen.\n"); 67 "programming pipes. Bad things might happen.\n");
68 } 68 }
69 69
70 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 70 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
71 tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); 71 tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24);
72 tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); 72 tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24);
73 WREG32(R600_MC_VM_FB_LOCATION, tmp); 73 WREG32(R600_MC_VM_FB_LOCATION, tmp);
@@ -140,7 +140,8 @@ void r600_vram_get_type(struct radeon_device *rdev)
140void r600_vram_info(struct radeon_device *rdev) 140void r600_vram_info(struct radeon_device *rdev)
141{ 141{
142 r600_vram_get_type(rdev); 142 r600_vram_get_type(rdev);
143 rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE); 143 rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE);
144 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
144 145
145 /* Could aper size report 0 ? */ 146 /* Could aper size report 0 ? */
146 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 147 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 146f3570af8e..20f17908b036 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -384,8 +384,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
384 DRM_INFO("Loading RV670 PFP Microcode\n"); 384 DRM_INFO("Loading RV670 PFP Microcode\n");
385 for (i = 0; i < PFP_UCODE_SIZE; i++) 385 for (i = 0; i < PFP_UCODE_SIZE; i++)
386 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV670_pfp_microcode[i]); 386 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV670_pfp_microcode[i]);
387 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { 387 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
388 DRM_INFO("Loading RS780 CP Microcode\n"); 388 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
389 DRM_INFO("Loading RS780/RS880 CP Microcode\n");
389 for (i = 0; i < PM4_UCODE_SIZE; i++) { 390 for (i = 0; i < PM4_UCODE_SIZE; i++) {
390 RADEON_WRITE(R600_CP_ME_RAM_DATA, 391 RADEON_WRITE(R600_CP_ME_RAM_DATA,
391 RS780_cp_microcode[i][0]); 392 RS780_cp_microcode[i][0]);
@@ -396,7 +397,7 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
396 } 397 }
397 398
398 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 399 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
399 DRM_INFO("Loading RS780 PFP Microcode\n"); 400 DRM_INFO("Loading RS780/RS880 PFP Microcode\n");
400 for (i = 0; i < PFP_UCODE_SIZE; i++) 401 for (i = 0; i < PFP_UCODE_SIZE; i++)
401 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RS780_pfp_microcode[i]); 402 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RS780_pfp_microcode[i]);
402 } 403 }
@@ -783,6 +784,7 @@ static void r600_gfx_init(struct drm_device *dev,
783 break; 784 break;
784 case CHIP_RV610: 785 case CHIP_RV610:
785 case CHIP_RS780: 786 case CHIP_RS780:
787 case CHIP_RS880:
786 case CHIP_RV620: 788 case CHIP_RV620:
787 dev_priv->r600_max_pipes = 1; 789 dev_priv->r600_max_pipes = 1;
788 dev_priv->r600_max_tile_pipes = 1; 790 dev_priv->r600_max_tile_pipes = 1;
@@ -917,7 +919,8 @@ static void r600_gfx_init(struct drm_device *dev,
917 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) || 919 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
918 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || 920 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
919 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || 921 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
920 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) 922 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
923 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
921 RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE); 924 RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE);
922 else 925 else
923 RADEON_WRITE(R600_DB_DEBUG, 0); 926 RADEON_WRITE(R600_DB_DEBUG, 0);
@@ -935,7 +938,8 @@ static void r600_gfx_init(struct drm_device *dev,
935 sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES); 938 sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES);
936 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || 939 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
937 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || 940 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
938 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { 941 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
942 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
939 sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) | 943 sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) |
940 R600_FETCH_FIFO_HIWATER(0xa) | 944 R600_FETCH_FIFO_HIWATER(0xa) |
941 R600_DONE_FIFO_HIWATER(0xe0) | 945 R600_DONE_FIFO_HIWATER(0xe0) |
@@ -978,7 +982,8 @@ static void r600_gfx_init(struct drm_device *dev,
978 R600_NUM_ES_STACK_ENTRIES(0)); 982 R600_NUM_ES_STACK_ENTRIES(0));
979 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || 983 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
980 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || 984 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
981 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { 985 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
986 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
982 /* no vertex cache */ 987 /* no vertex cache */
983 sq_config &= ~R600_VC_ENABLE; 988 sq_config &= ~R600_VC_ENABLE;
984 989
@@ -1035,7 +1040,8 @@ static void r600_gfx_init(struct drm_device *dev,
1035 1040
1036 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || 1041 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
1037 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || 1042 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
1038 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) 1043 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
1044 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
1039 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY)); 1045 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY));
1040 else 1046 else
1041 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC)); 1047 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC));
@@ -1078,6 +1084,7 @@ static void r600_gfx_init(struct drm_device *dev,
1078 break; 1084 break;
1079 case CHIP_RV610: 1085 case CHIP_RV610:
1080 case CHIP_RS780: 1086 case CHIP_RS780:
1087 case CHIP_RS880:
1081 case CHIP_RV620: 1088 case CHIP_RV620:
1082 gs_prim_buffer_depth = 32; 1089 gs_prim_buffer_depth = 32;
1083 break; 1090 break;
@@ -1123,6 +1130,7 @@ static void r600_gfx_init(struct drm_device *dev,
1123 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1130 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1124 case CHIP_RV610: 1131 case CHIP_RV610:
1125 case CHIP_RS780: 1132 case CHIP_RS780:
1133 case CHIP_RS880:
1126 case CHIP_RV620: 1134 case CHIP_RV620:
1127 tc_cntl = R600_TC_L2_SIZE(8); 1135 tc_cntl = R600_TC_L2_SIZE(8);
1128 break; 1136 break;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d61f2fc61df5..b1d945b8ed6c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -64,6 +64,7 @@ extern int radeon_agpmode;
64extern int radeon_vram_limit; 64extern int radeon_vram_limit;
65extern int radeon_gart_size; 65extern int radeon_gart_size;
66extern int radeon_benchmarking; 66extern int radeon_benchmarking;
67extern int radeon_testing;
67extern int radeon_connector_table; 68extern int radeon_connector_table;
68 69
69/* 70/*
@@ -113,6 +114,7 @@ enum radeon_family {
113 CHIP_RV770, 114 CHIP_RV770,
114 CHIP_RV730, 115 CHIP_RV730,
115 CHIP_RV710, 116 CHIP_RV710,
117 CHIP_RS880,
116 CHIP_LAST, 118 CHIP_LAST,
117}; 119};
118 120
@@ -201,6 +203,14 @@ int radeon_fence_wait_last(struct radeon_device *rdev);
201struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); 203struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
202void radeon_fence_unref(struct radeon_fence **fence); 204void radeon_fence_unref(struct radeon_fence **fence);
203 205
206/*
207 * Tiling registers
208 */
209struct radeon_surface_reg {
210 struct radeon_object *robj;
211};
212
213#define RADEON_GEM_MAX_SURFACES 8
204 214
205/* 215/*
206 * Radeon buffer. 216 * Radeon buffer.
@@ -213,6 +223,7 @@ struct radeon_object_list {
213 uint64_t gpu_offset; 223 uint64_t gpu_offset;
214 unsigned rdomain; 224 unsigned rdomain;
215 unsigned wdomain; 225 unsigned wdomain;
226 uint32_t tiling_flags;
216}; 227};
217 228
218int radeon_object_init(struct radeon_device *rdev); 229int radeon_object_init(struct radeon_device *rdev);
@@ -242,8 +253,15 @@ void radeon_object_list_clean(struct list_head *head);
242int radeon_object_fbdev_mmap(struct radeon_object *robj, 253int radeon_object_fbdev_mmap(struct radeon_object *robj,
243 struct vm_area_struct *vma); 254 struct vm_area_struct *vma);
244unsigned long radeon_object_size(struct radeon_object *robj); 255unsigned long radeon_object_size(struct radeon_object *robj);
245 256void radeon_object_clear_surface_reg(struct radeon_object *robj);
246 257int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
258 bool force_drop);
259void radeon_object_set_tiling_flags(struct radeon_object *robj,
260 uint32_t tiling_flags, uint32_t pitch);
261void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
262void radeon_bo_move_notify(struct ttm_buffer_object *bo,
263 struct ttm_mem_reg *mem);
264void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
247/* 265/*
248 * GEM objects. 266 * GEM objects.
249 */ 267 */
@@ -315,8 +333,11 @@ struct radeon_mc {
315 unsigned gtt_location; 333 unsigned gtt_location;
316 unsigned gtt_size; 334 unsigned gtt_size;
317 unsigned vram_location; 335 unsigned vram_location;
318 unsigned vram_size; 336 /* for some chips with <= 32MB we need to lie
337 * about vram size near mc fb location */
338 unsigned mc_vram_size;
319 unsigned vram_width; 339 unsigned vram_width;
340 unsigned real_vram_size;
320 int vram_mtrr; 341 int vram_mtrr;
321 bool vram_is_ddr; 342 bool vram_is_ddr;
322}; 343};
@@ -474,6 +495,39 @@ struct radeon_wb {
474 uint64_t gpu_addr; 495 uint64_t gpu_addr;
475}; 496};
476 497
498/**
499 * struct radeon_pm - power management datas
500 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
501 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
502 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
503 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
504 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
505 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
506 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
507 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
508 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
509 * @sclk: GPU clock Mhz (core bandwith depends of this clock)
510 * @needed_bandwidth: current bandwidth needs
511 *
512 * It keeps track of various data needed to take powermanagement decision.
513 * Bandwith need is used to determine minimun clock of the GPU and memory.
514 * Equation between gpu/memory clock and available bandwidth is hw dependent
515 * (type of memory, bus size, efficiency, ...)
516 */
517struct radeon_pm {
518 fixed20_12 max_bandwidth;
519 fixed20_12 igp_sideport_mclk;
520 fixed20_12 igp_system_mclk;
521 fixed20_12 igp_ht_link_clk;
522 fixed20_12 igp_ht_link_width;
523 fixed20_12 k8_bandwidth;
524 fixed20_12 sideport_bandwidth;
525 fixed20_12 ht_bandwidth;
526 fixed20_12 core_bandwidth;
527 fixed20_12 sclk;
528 fixed20_12 needed_bandwidth;
529};
530
477 531
478/* 532/*
479 * Benchmarking 533 * Benchmarking
@@ -482,6 +536,12 @@ void radeon_benchmark(struct radeon_device *rdev);
482 536
483 537
484/* 538/*
539 * Testing
540 */
541void radeon_test_moves(struct radeon_device *rdev);
542
543
544/*
485 * Debugfs 545 * Debugfs
486 */ 546 */
487int radeon_debugfs_add_files(struct radeon_device *rdev, 547int radeon_debugfs_add_files(struct radeon_device *rdev,
@@ -535,6 +595,11 @@ struct radeon_asic {
535 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 595 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
536 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 596 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
537 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 597 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
598 int (*set_surface_reg)(struct radeon_device *rdev, int reg,
599 uint32_t tiling_flags, uint32_t pitch,
600 uint32_t offset, uint32_t obj_size);
601 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
602 void (*bandwidth_update)(struct radeon_device *rdev);
538}; 603};
539 604
540union radeon_asic_config { 605union radeon_asic_config {
@@ -566,6 +631,10 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
566int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 631int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
567 struct drm_file *filp); 632 struct drm_file *filp);
568int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 633int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
634int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
635 struct drm_file *filp);
636int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
637 struct drm_file *filp);
569 638
570 639
571/* 640/*
@@ -594,8 +663,8 @@ struct radeon_device {
594 struct radeon_object *fbdev_robj; 663 struct radeon_object *fbdev_robj;
595 struct radeon_framebuffer *fbdev_rfb; 664 struct radeon_framebuffer *fbdev_rfb;
596 /* Register mmio */ 665 /* Register mmio */
597 unsigned long rmmio_base; 666 resource_size_t rmmio_base;
598 unsigned long rmmio_size; 667 resource_size_t rmmio_size;
599 void *rmmio; 668 void *rmmio;
600 radeon_rreg_t mm_rreg; 669 radeon_rreg_t mm_rreg;
601 radeon_wreg_t mm_wreg; 670 radeon_wreg_t mm_wreg;
@@ -619,11 +688,14 @@ struct radeon_device {
619 struct radeon_irq irq; 688 struct radeon_irq irq;
620 struct radeon_asic *asic; 689 struct radeon_asic *asic;
621 struct radeon_gem gem; 690 struct radeon_gem gem;
691 struct radeon_pm pm;
622 struct mutex cs_mutex; 692 struct mutex cs_mutex;
623 struct radeon_wb wb; 693 struct radeon_wb wb;
624 bool gpu_lockup; 694 bool gpu_lockup;
625 bool shutdown; 695 bool shutdown;
626 bool suspend; 696 bool suspend;
697 bool need_dma32;
698 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
627}; 699};
628 700
629int radeon_device_init(struct radeon_device *rdev, 701int radeon_device_init(struct radeon_device *rdev,
@@ -670,6 +742,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
670/* 742/*
671 * ASICs helpers. 743 * ASICs helpers.
672 */ 744 */
745#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
746 (rdev->pdev->device == 0x5969))
673#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ 747#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
674 (rdev->family == CHIP_RV200) || \ 748 (rdev->family == CHIP_RV200) || \
675 (rdev->family == CHIP_RS100) || \ 749 (rdev->family == CHIP_RS100) || \
@@ -796,5 +870,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
796#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 870#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
797#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) 871#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
798#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) 872#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
873#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
874#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
875#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
799 876
800#endif 877#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e2e567395df8..9a75876e0c3b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -71,6 +71,11 @@ int r100_copy_blit(struct radeon_device *rdev,
71 uint64_t dst_offset, 71 uint64_t dst_offset,
72 unsigned num_pages, 72 unsigned num_pages,
73 struct radeon_fence *fence); 73 struct radeon_fence *fence);
74int r100_set_surface_reg(struct radeon_device *rdev, int reg,
75 uint32_t tiling_flags, uint32_t pitch,
76 uint32_t offset, uint32_t obj_size);
77int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
78void r100_bandwidth_update(struct radeon_device *rdev);
74 79
75static struct radeon_asic r100_asic = { 80static struct radeon_asic r100_asic = {
76 .init = &r100_init, 81 .init = &r100_init,
@@ -100,6 +105,9 @@ static struct radeon_asic r100_asic = {
100 .set_memory_clock = NULL, 105 .set_memory_clock = NULL,
101 .set_pcie_lanes = NULL, 106 .set_pcie_lanes = NULL,
102 .set_clock_gating = &radeon_legacy_set_clock_gating, 107 .set_clock_gating = &radeon_legacy_set_clock_gating,
108 .set_surface_reg = r100_set_surface_reg,
109 .clear_surface_reg = r100_clear_surface_reg,
110 .bandwidth_update = &r100_bandwidth_update,
103}; 111};
104 112
105 113
@@ -128,6 +136,7 @@ int r300_copy_dma(struct radeon_device *rdev,
128 uint64_t dst_offset, 136 uint64_t dst_offset,
129 unsigned num_pages, 137 unsigned num_pages,
130 struct radeon_fence *fence); 138 struct radeon_fence *fence);
139
131static struct radeon_asic r300_asic = { 140static struct radeon_asic r300_asic = {
132 .init = &r300_init, 141 .init = &r300_init,
133 .errata = &r300_errata, 142 .errata = &r300_errata,
@@ -156,6 +165,9 @@ static struct radeon_asic r300_asic = {
156 .set_memory_clock = NULL, 165 .set_memory_clock = NULL,
157 .set_pcie_lanes = &rv370_set_pcie_lanes, 166 .set_pcie_lanes = &rv370_set_pcie_lanes,
158 .set_clock_gating = &radeon_legacy_set_clock_gating, 167 .set_clock_gating = &radeon_legacy_set_clock_gating,
168 .set_surface_reg = r100_set_surface_reg,
169 .clear_surface_reg = r100_clear_surface_reg,
170 .bandwidth_update = &r100_bandwidth_update,
159}; 171};
160 172
161/* 173/*
@@ -193,6 +205,9 @@ static struct radeon_asic r420_asic = {
193 .set_memory_clock = &radeon_atom_set_memory_clock, 205 .set_memory_clock = &radeon_atom_set_memory_clock,
194 .set_pcie_lanes = &rv370_set_pcie_lanes, 206 .set_pcie_lanes = &rv370_set_pcie_lanes,
195 .set_clock_gating = &radeon_atom_set_clock_gating, 207 .set_clock_gating = &radeon_atom_set_clock_gating,
208 .set_surface_reg = r100_set_surface_reg,
209 .clear_surface_reg = r100_clear_surface_reg,
210 .bandwidth_update = &r100_bandwidth_update,
196}; 211};
197 212
198 213
@@ -237,6 +252,9 @@ static struct radeon_asic rs400_asic = {
237 .set_memory_clock = NULL, 252 .set_memory_clock = NULL,
238 .set_pcie_lanes = NULL, 253 .set_pcie_lanes = NULL,
239 .set_clock_gating = &radeon_legacy_set_clock_gating, 254 .set_clock_gating = &radeon_legacy_set_clock_gating,
255 .set_surface_reg = r100_set_surface_reg,
256 .clear_surface_reg = r100_clear_surface_reg,
257 .bandwidth_update = &r100_bandwidth_update,
240}; 258};
241 259
242 260
@@ -254,6 +272,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev);
254int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 272int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
255uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 273uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
256void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 274void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
275void rs600_bandwidth_update(struct radeon_device *rdev);
257static struct radeon_asic rs600_asic = { 276static struct radeon_asic rs600_asic = {
258 .init = &r300_init, 277 .init = &r300_init,
259 .errata = &rs600_errata, 278 .errata = &rs600_errata,
@@ -282,6 +301,7 @@ static struct radeon_asic rs600_asic = {
282 .set_memory_clock = &radeon_atom_set_memory_clock, 301 .set_memory_clock = &radeon_atom_set_memory_clock,
283 .set_pcie_lanes = NULL, 302 .set_pcie_lanes = NULL,
284 .set_clock_gating = &radeon_atom_set_clock_gating, 303 .set_clock_gating = &radeon_atom_set_clock_gating,
304 .bandwidth_update = &rs600_bandwidth_update,
285}; 305};
286 306
287 307
@@ -294,6 +314,7 @@ int rs690_mc_init(struct radeon_device *rdev);
294void rs690_mc_fini(struct radeon_device *rdev); 314void rs690_mc_fini(struct radeon_device *rdev);
295uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); 315uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
296void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 316void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
317void rs690_bandwidth_update(struct radeon_device *rdev);
297static struct radeon_asic rs690_asic = { 318static struct radeon_asic rs690_asic = {
298 .init = &r300_init, 319 .init = &r300_init,
299 .errata = &rs690_errata, 320 .errata = &rs690_errata,
@@ -322,6 +343,9 @@ static struct radeon_asic rs690_asic = {
322 .set_memory_clock = &radeon_atom_set_memory_clock, 343 .set_memory_clock = &radeon_atom_set_memory_clock,
323 .set_pcie_lanes = NULL, 344 .set_pcie_lanes = NULL,
324 .set_clock_gating = &radeon_atom_set_clock_gating, 345 .set_clock_gating = &radeon_atom_set_clock_gating,
346 .set_surface_reg = r100_set_surface_reg,
347 .clear_surface_reg = r100_clear_surface_reg,
348 .bandwidth_update = &rs690_bandwidth_update,
325}; 349};
326 350
327 351
@@ -339,6 +363,7 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
339void rv515_ring_start(struct radeon_device *rdev); 363void rv515_ring_start(struct radeon_device *rdev);
340uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 364uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
341void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 365void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
366void rv515_bandwidth_update(struct radeon_device *rdev);
342static struct radeon_asic rv515_asic = { 367static struct radeon_asic rv515_asic = {
343 .init = &rv515_init, 368 .init = &rv515_init,
344 .errata = &rv515_errata, 369 .errata = &rv515_errata,
@@ -367,6 +392,9 @@ static struct radeon_asic rv515_asic = {
367 .set_memory_clock = &radeon_atom_set_memory_clock, 392 .set_memory_clock = &radeon_atom_set_memory_clock,
368 .set_pcie_lanes = &rv370_set_pcie_lanes, 393 .set_pcie_lanes = &rv370_set_pcie_lanes,
369 .set_clock_gating = &radeon_atom_set_clock_gating, 394 .set_clock_gating = &radeon_atom_set_clock_gating,
395 .set_surface_reg = r100_set_surface_reg,
396 .clear_surface_reg = r100_clear_surface_reg,
397 .bandwidth_update = &rv515_bandwidth_update,
370}; 398};
371 399
372 400
@@ -377,6 +405,7 @@ void r520_errata(struct radeon_device *rdev);
377void r520_vram_info(struct radeon_device *rdev); 405void r520_vram_info(struct radeon_device *rdev);
378int r520_mc_init(struct radeon_device *rdev); 406int r520_mc_init(struct radeon_device *rdev);
379void r520_mc_fini(struct radeon_device *rdev); 407void r520_mc_fini(struct radeon_device *rdev);
408void r520_bandwidth_update(struct radeon_device *rdev);
380static struct radeon_asic r520_asic = { 409static struct radeon_asic r520_asic = {
381 .init = &rv515_init, 410 .init = &rv515_init,
382 .errata = &r520_errata, 411 .errata = &r520_errata,
@@ -405,6 +434,9 @@ static struct radeon_asic r520_asic = {
405 .set_memory_clock = &radeon_atom_set_memory_clock, 434 .set_memory_clock = &radeon_atom_set_memory_clock,
406 .set_pcie_lanes = &rv370_set_pcie_lanes, 435 .set_pcie_lanes = &rv370_set_pcie_lanes,
407 .set_clock_gating = &radeon_atom_set_clock_gating, 436 .set_clock_gating = &radeon_atom_set_clock_gating,
437 .set_surface_reg = r100_set_surface_reg,
438 .clear_surface_reg = r100_clear_surface_reg,
439 .bandwidth_update = &r520_bandwidth_update,
408}; 440};
409 441
410/* 442/*
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 1f5a1a490984..fcfe5c02d744 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -103,7 +103,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
103static bool radeon_atom_apply_quirks(struct drm_device *dev, 103static bool radeon_atom_apply_quirks(struct drm_device *dev,
104 uint32_t supported_device, 104 uint32_t supported_device,
105 int *connector_type, 105 int *connector_type,
106 struct radeon_i2c_bus_rec *i2c_bus) 106 struct radeon_i2c_bus_rec *i2c_bus,
107 uint8_t *line_mux)
107{ 108{
108 109
109 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ 110 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@@ -127,8 +128,10 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
127 if ((dev->pdev->device == 0x5653) && 128 if ((dev->pdev->device == 0x5653) &&
128 (dev->pdev->subsystem_vendor == 0x1462) && 129 (dev->pdev->subsystem_vendor == 0x1462) &&
129 (dev->pdev->subsystem_device == 0x0291)) { 130 (dev->pdev->subsystem_device == 0x0291)) {
130 if (*connector_type == DRM_MODE_CONNECTOR_LVDS) 131 if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
131 i2c_bus->valid = false; 132 i2c_bus->valid = false;
133 *line_mux = 53;
134 }
132 } 135 }
133 136
134 /* Funky macbooks */ 137 /* Funky macbooks */
@@ -526,7 +529,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
526 529
527 if (!radeon_atom_apply_quirks 530 if (!radeon_atom_apply_quirks
528 (dev, (1 << i), &bios_connectors[i].connector_type, 531 (dev, (1 << i), &bios_connectors[i].connector_type,
529 &bios_connectors[i].ddc_bus)) 532 &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux))
530 continue; 533 continue;
531 534
532 bios_connectors[i].valid = true; 535 bios_connectors[i].valid = true;
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index c44403a2ca76..2e938f7496fb 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
63 if (r) { 63 if (r) {
64 goto out_cleanup; 64 goto out_cleanup;
65 } 65 }
66 r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence); 66 r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence);
67 if (r) { 67 if (r) {
68 goto out_cleanup; 68 goto out_cleanup;
69 } 69 }
@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
88 if (r) { 88 if (r) {
89 goto out_cleanup; 89 goto out_cleanup;
90 } 90 }
91 r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence); 91 r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence);
92 if (r) { 92 if (r) {
93 goto out_cleanup; 93 goto out_cleanup;
94 } 94 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index b843f9bdfb14..a169067efc4e 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -127,17 +127,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
127 sizeof(struct drm_radeon_cs_chunk))) { 127 sizeof(struct drm_radeon_cs_chunk))) {
128 return -EFAULT; 128 return -EFAULT;
129 } 129 }
130 p->chunks[i].length_dw = user_chunk.length_dw;
131 p->chunks[i].kdata = NULL;
130 p->chunks[i].chunk_id = user_chunk.chunk_id; 132 p->chunks[i].chunk_id = user_chunk.chunk_id;
133
131 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 134 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
132 p->chunk_relocs_idx = i; 135 p->chunk_relocs_idx = i;
133 } 136 }
134 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 137 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
135 p->chunk_ib_idx = i; 138 p->chunk_ib_idx = i;
139 /* zero length IB isn't useful */
140 if (p->chunks[i].length_dw == 0)
141 return -EINVAL;
136 } 142 }
143
137 p->chunks[i].length_dw = user_chunk.length_dw; 144 p->chunks[i].length_dw = user_chunk.length_dw;
138 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 145 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
139 146
140 p->chunks[i].kdata = NULL;
141 size = p->chunks[i].length_dw * sizeof(uint32_t); 147 size = p->chunks[i].length_dw * sizeof(uint32_t);
142 p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); 148 p->chunks[i].kdata = kzalloc(size, GFP_KERNEL);
143 if (p->chunks[i].kdata == NULL) { 149 if (p->chunks[i].kdata == NULL) {
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 5232441f119b..b13c79e38bc0 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -111,9 +111,11 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
111 111
112 if (ASIC_IS_AVIVO(rdev)) 112 if (ASIC_IS_AVIVO(rdev))
113 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); 113 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
114 else 114 else {
115 radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
115 /* offset is from DISP(2)_BASE_ADDRESS */ 116 /* offset is from DISP(2)_BASE_ADDRESS */
116 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr); 117 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
118 }
117} 119}
118 120
119int radeon_crtc_cursor_set(struct drm_crtc *crtc, 121int radeon_crtc_cursor_set(struct drm_crtc *crtc,
@@ -245,6 +247,9 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
245 (RADEON_CUR_LOCK 247 (RADEON_CUR_LOCK
246 | ((xorigin ? 0 : x) << 16) 248 | ((xorigin ? 0 : x) << 16)
247 | (yorigin ? 0 : y))); 249 | (yorigin ? 0 : y)));
250 /* offset is from DISP(2)_BASE_ADDRESS */
251 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
252 (yorigin * 256)));
248 } 253 }
249 radeon_lock_cursor(crtc, false); 254 radeon_lock_cursor(crtc, false);
250 255
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f30aa7274a54..9ff6dcb97f9d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -35,6 +35,25 @@
35#include "atom.h" 35#include "atom.h"
36 36
37/* 37/*
38 * Clear GPU surface registers.
39 */
40static void radeon_surface_init(struct radeon_device *rdev)
41{
42 /* FIXME: check this out */
43 if (rdev->family < CHIP_R600) {
44 int i;
45
46 for (i = 0; i < 8; i++) {
47 WREG32(RADEON_SURFACE0_INFO +
48 i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
49 0);
50 }
51 /* enable surfaces */
52 WREG32(RADEON_SURFACE_CNTL, 0);
53 }
54}
55
56/*
38 * GPU scratch registers helpers function. 57 * GPU scratch registers helpers function.
39 */ 58 */
40static void radeon_scratch_init(struct radeon_device *rdev) 59static void radeon_scratch_init(struct radeon_device *rdev)
@@ -102,7 +121,7 @@ int radeon_mc_setup(struct radeon_device *rdev)
102 if (rdev->mc.vram_location != 0xFFFFFFFFUL) { 121 if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
103 /* vram location was already setup try to put gtt after 122 /* vram location was already setup try to put gtt after
104 * if it fits */ 123 * if it fits */
105 tmp = rdev->mc.vram_location + rdev->mc.vram_size; 124 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
106 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 125 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
107 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { 126 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
108 rdev->mc.gtt_location = tmp; 127 rdev->mc.gtt_location = tmp;
@@ -117,13 +136,13 @@ int radeon_mc_setup(struct radeon_device *rdev)
117 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { 136 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
118 /* gtt location was already setup try to put vram before 137 /* gtt location was already setup try to put vram before
119 * if it fits */ 138 * if it fits */
120 if (rdev->mc.vram_size < rdev->mc.gtt_location) { 139 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
121 rdev->mc.vram_location = 0; 140 rdev->mc.vram_location = 0;
122 } else { 141 } else {
123 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; 142 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
124 tmp += (rdev->mc.vram_size - 1); 143 tmp += (rdev->mc.mc_vram_size - 1);
125 tmp &= ~(rdev->mc.vram_size - 1); 144 tmp &= ~(rdev->mc.mc_vram_size - 1);
126 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) { 145 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
127 rdev->mc.vram_location = tmp; 146 rdev->mc.vram_location = tmp;
128 } else { 147 } else {
129 printk(KERN_ERR "[drm] vram too big to fit " 148 printk(KERN_ERR "[drm] vram too big to fit "
@@ -133,12 +152,16 @@ int radeon_mc_setup(struct radeon_device *rdev)
133 } 152 }
134 } else { 153 } else {
135 rdev->mc.vram_location = 0; 154 rdev->mc.vram_location = 0;
136 rdev->mc.gtt_location = rdev->mc.vram_size; 155 tmp = rdev->mc.mc_vram_size;
156 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
157 rdev->mc.gtt_location = tmp;
137 } 158 }
138 DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20); 159 DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20);
139 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", 160 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
140 rdev->mc.vram_location, 161 rdev->mc.vram_location,
141 rdev->mc.vram_location + rdev->mc.vram_size - 1); 162 rdev->mc.vram_location + rdev->mc.mc_vram_size - 1);
163 if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size)
164 DRM_INFO("radeon: VRAM less than aperture workaround enabled\n");
142 DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); 165 DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
143 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", 166 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
144 rdev->mc.gtt_location, 167 rdev->mc.gtt_location,
@@ -433,6 +456,7 @@ int radeon_device_init(struct radeon_device *rdev,
433 uint32_t flags) 456 uint32_t flags)
434{ 457{
435 int r, ret; 458 int r, ret;
459 int dma_bits;
436 460
437 DRM_INFO("radeon: Initializing kernel modesetting.\n"); 461 DRM_INFO("radeon: Initializing kernel modesetting.\n");
438 rdev->shutdown = false; 462 rdev->shutdown = false;
@@ -475,8 +499,20 @@ int radeon_device_init(struct radeon_device *rdev,
475 return r; 499 return r;
476 } 500 }
477 501
478 /* Report DMA addressing limitation */ 502 /* set DMA mask + need_dma32 flags.
479 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 503 * PCIE - can handle 40-bits.
504 * IGP - can handle 40-bits (in theory)
505 * AGP - generally dma32 is safest
506 * PCI - only dma32
507 */
508 rdev->need_dma32 = false;
509 if (rdev->flags & RADEON_IS_AGP)
510 rdev->need_dma32 = true;
511 if (rdev->flags & RADEON_IS_PCI)
512 rdev->need_dma32 = true;
513
514 dma_bits = rdev->need_dma32 ? 32 : 40;
515 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
480 if (r) { 516 if (r) {
481 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 517 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
482 } 518 }
@@ -496,6 +532,8 @@ int radeon_device_init(struct radeon_device *rdev,
496 radeon_errata(rdev); 532 radeon_errata(rdev);
497 /* Initialize scratch registers */ 533 /* Initialize scratch registers */
498 radeon_scratch_init(rdev); 534 radeon_scratch_init(rdev);
535 /* Initialize surface registers */
536 radeon_surface_init(rdev);
499 537
500 /* TODO: disable VGA need to use VGA request */ 538 /* TODO: disable VGA need to use VGA request */
501 /* BIOS*/ 539 /* BIOS*/
@@ -527,27 +565,22 @@ int radeon_device_init(struct radeon_device *rdev,
527 radeon_combios_asic_init(rdev->ddev); 565 radeon_combios_asic_init(rdev->ddev);
528 } 566 }
529 } 567 }
568 /* Initialize clocks */
569 r = radeon_clocks_init(rdev);
570 if (r) {
571 return r;
572 }
530 /* Get vram informations */ 573 /* Get vram informations */
531 radeon_vram_info(rdev); 574 radeon_vram_info(rdev);
532 /* Device is severly broken if aper size > vram size. 575
533 * for RN50/M6/M7 - Novell bug 204882 ?
534 */
535 if (rdev->mc.vram_size < rdev->mc.aper_size) {
536 rdev->mc.aper_size = rdev->mc.vram_size;
537 }
538 /* Add an MTRR for the VRAM */ 576 /* Add an MTRR for the VRAM */
539 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 577 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
540 MTRR_TYPE_WRCOMB, 1); 578 MTRR_TYPE_WRCOMB, 1);
541 DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", 579 DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
542 rdev->mc.vram_size >> 20, 580 rdev->mc.real_vram_size >> 20,
543 (unsigned)rdev->mc.aper_size >> 20); 581 (unsigned)rdev->mc.aper_size >> 20);
544 DRM_INFO("RAM width %dbits %cDR\n", 582 DRM_INFO("RAM width %dbits %cDR\n",
545 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); 583 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
546 /* Initialize clocks */
547 r = radeon_clocks_init(rdev);
548 if (r) {
549 return r;
550 }
551 /* Initialize memory controller (also test AGP) */ 584 /* Initialize memory controller (also test AGP) */
552 r = radeon_mc_init(rdev); 585 r = radeon_mc_init(rdev);
553 if (r) { 586 if (r) {
@@ -604,12 +637,12 @@ int radeon_device_init(struct radeon_device *rdev,
604 if (r) { 637 if (r) {
605 return r; 638 return r;
606 } 639 }
607 if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
608 rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
609 }
610 if (!ret) { 640 if (!ret) {
611 DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); 641 DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
612 } 642 }
643 if (radeon_testing) {
644 radeon_test_moves(rdev);
645 }
613 if (radeon_benchmarking) { 646 if (radeon_benchmarking) {
614 radeon_benchmark(rdev); 647 radeon_benchmark(rdev);
615 } 648 }
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3efcf1a526be..a8fa1bb84cf7 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -187,6 +187,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
187 187
188 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); 188 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
189 radeon_crtc->crtc_id = index; 189 radeon_crtc->crtc_id = index;
190 rdev->mode_info.crtcs[index] = radeon_crtc;
190 191
191 radeon_crtc->mode_set.crtc = &radeon_crtc->base; 192 radeon_crtc->mode_set.crtc = &radeon_crtc->base;
192 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); 193 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
@@ -491,7 +492,11 @@ void radeon_compute_pll(struct radeon_pll *pll,
491 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 492 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
492 current_freq = radeon_div(tmp, ref_div * post_div); 493 current_freq = radeon_div(tmp, ref_div * post_div);
493 494
494 error = abs(current_freq - freq); 495 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
496 error = freq - current_freq;
497 error = error < 0 ? 0xffffffff : error;
498 } else
499 error = abs(current_freq - freq);
495 vco_diff = abs(vco - best_vco); 500 vco_diff = abs(vco - best_vco);
496 501
497 if ((best_vco == 0 && error < best_error) || 502 if ((best_vco == 0 && error < best_error) ||
@@ -657,36 +662,51 @@ void radeon_modeset_fini(struct radeon_device *rdev)
657 } 662 }
658} 663}
659 664
660void radeon_init_disp_bandwidth(struct drm_device *dev) 665bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
666 struct drm_display_mode *mode,
667 struct drm_display_mode *adjusted_mode)
661{ 668{
662 struct radeon_device *rdev = dev->dev_private; 669 struct drm_device *dev = crtc->dev;
663 struct drm_display_mode *modes[2]; 670 struct drm_encoder *encoder;
664 int pixel_bytes[2]; 671 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
665 struct drm_crtc *crtc; 672 struct radeon_encoder *radeon_encoder;
666 673 bool first = true;
667 pixel_bytes[0] = pixel_bytes[1] = 0;
668 modes[0] = modes[1] = NULL;
669
670 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
671 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
672 674
673 if (crtc->enabled && crtc->fb) { 675 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
674 modes[radeon_crtc->crtc_id] = &crtc->mode; 676 radeon_encoder = to_radeon_encoder(encoder);
675 pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8; 677 if (encoder->crtc != crtc)
678 continue;
679 if (first) {
680 radeon_crtc->rmx_type = radeon_encoder->rmx_type;
681 radeon_crtc->devices = radeon_encoder->devices;
682 memcpy(&radeon_crtc->native_mode,
683 &radeon_encoder->native_mode,
684 sizeof(struct radeon_native_mode));
685 first = false;
686 } else {
687 if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
688 /* WARNING: Right now this can't happen but
689 * in the future we need to check that scaling
690 * are consistent accross different encoder
691 * (ie all encoder can work with the same
692 * scaling).
693 */
694 DRM_ERROR("Scaling not consistent accross encoder.\n");
695 return false;
696 }
676 } 697 }
677 } 698 }
678 699 if (radeon_crtc->rmx_type != RMX_OFF) {
679 if (ASIC_IS_AVIVO(rdev)) { 700 fixed20_12 a, b;
680 radeon_init_disp_bw_avivo(dev, 701 a.full = rfixed_const(crtc->mode.vdisplay);
681 modes[0], 702 b.full = rfixed_const(radeon_crtc->native_mode.panel_xres);
682 pixel_bytes[0], 703 radeon_crtc->vsc.full = rfixed_div(a, b);
683 modes[1], 704 a.full = rfixed_const(crtc->mode.hdisplay);
684 pixel_bytes[1]); 705 b.full = rfixed_const(radeon_crtc->native_mode.panel_yres);
706 radeon_crtc->hsc.full = rfixed_div(a, b);
685 } else { 707 } else {
686 radeon_init_disp_bw_legacy(dev, 708 radeon_crtc->vsc.full = rfixed_const(1);
687 modes[0], 709 radeon_crtc->hsc.full = rfixed_const(1);
688 pixel_bytes[0],
689 modes[1],
690 pixel_bytes[1]);
691 } 710 }
711 return true;
692} 712}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 09c9fb9f6210..0bd5879a4957 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -89,6 +89,7 @@ int radeon_agpmode = 0;
89int radeon_vram_limit = 0; 89int radeon_vram_limit = 0;
90int radeon_gart_size = 512; /* default gart size */ 90int radeon_gart_size = 512; /* default gart size */
91int radeon_benchmarking = 0; 91int radeon_benchmarking = 0;
92int radeon_testing = 0;
92int radeon_connector_table = 0; 93int radeon_connector_table = 0;
93#endif 94#endif
94 95
@@ -117,6 +118,9 @@ module_param_named(gartsize, radeon_gart_size, int, 0600);
117MODULE_PARM_DESC(benchmark, "Run benchmark"); 118MODULE_PARM_DESC(benchmark, "Run benchmark");
118module_param_named(benchmark, radeon_benchmarking, int, 0444); 119module_param_named(benchmark, radeon_benchmarking, int, 0444);
119 120
121MODULE_PARM_DESC(test, "Run tests");
122module_param_named(test, radeon_testing, int, 0444);
123
120MODULE_PARM_DESC(connector_table, "Force connector table"); 124MODULE_PARM_DESC(connector_table, "Force connector table");
121module_param_named(connector_table, radeon_connector_table, int, 0444); 125module_param_named(connector_table, radeon_connector_table, int, 0444);
122#endif 126#endif
@@ -314,6 +318,14 @@ static int __init radeon_init(void)
314 driver = &driver_old; 318 driver = &driver_old;
315 driver->num_ioctls = radeon_max_ioctl; 319 driver->num_ioctls = radeon_max_ioctl;
316#if defined(CONFIG_DRM_RADEON_KMS) 320#if defined(CONFIG_DRM_RADEON_KMS)
321#ifdef CONFIG_VGA_CONSOLE
322 if (vgacon_text_force() && radeon_modeset == -1) {
323 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
324 driver = &driver_old;
325 driver->driver_features &= ~DRIVER_MODESET;
326 radeon_modeset = 0;
327 }
328#endif
317 /* if enabled by default */ 329 /* if enabled by default */
318 if (radeon_modeset == -1) { 330 if (radeon_modeset == -1) {
319 DRM_INFO("radeon default to kernel modesetting.\n"); 331 DRM_INFO("radeon default to kernel modesetting.\n");
@@ -325,17 +337,8 @@ static int __init radeon_init(void)
325 driver->driver_features |= DRIVER_MODESET; 337 driver->driver_features |= DRIVER_MODESET;
326 driver->num_ioctls = radeon_max_kms_ioctl; 338 driver->num_ioctls = radeon_max_kms_ioctl;
327 } 339 }
328
329 /* if the vga console setting is enabled still 340 /* if the vga console setting is enabled still
330 * let modprobe override it */ 341 * let modprobe override it */
331#ifdef CONFIG_VGA_CONSOLE
332 if (vgacon_text_force() && radeon_modeset == -1) {
333 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
334 driver = &driver_old;
335 driver->driver_features &= ~DRIVER_MODESET;
336 radeon_modeset = 0;
337 }
338#endif
339#endif 342#endif
340 return drm_init(driver); 343 return drm_init(driver);
341} 344}
@@ -345,7 +348,7 @@ static void __exit radeon_exit(void)
345 drm_exit(driver); 348 drm_exit(driver);
346} 349}
347 350
348late_initcall(radeon_init); 351module_init(radeon_init);
349module_exit(radeon_exit); 352module_exit(radeon_exit);
350 353
351MODULE_AUTHOR(DRIVER_AUTHOR); 354MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 127d0456f628..3933f8216a34 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -143,6 +143,7 @@ enum radeon_family {
143 CHIP_RV635, 143 CHIP_RV635,
144 CHIP_RV670, 144 CHIP_RV670,
145 CHIP_RS780, 145 CHIP_RS780,
146 CHIP_RS880,
146 CHIP_RV770, 147 CHIP_RV770,
147 CHIP_RV730, 148 CHIP_RV730,
148 CHIP_RV710, 149 CHIP_RV710,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c8ef0d14ffab..0a92706eac19 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -154,7 +154,6 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
154 154
155 if (mode->hdisplay < native_mode->panel_xres || 155 if (mode->hdisplay < native_mode->panel_xres ||
156 mode->vdisplay < native_mode->panel_yres) { 156 mode->vdisplay < native_mode->panel_yres) {
157 radeon_encoder->flags |= RADEON_USE_RMX;
158 if (ASIC_IS_AVIVO(rdev)) { 157 if (ASIC_IS_AVIVO(rdev)) {
159 adjusted_mode->hdisplay = native_mode->panel_xres; 158 adjusted_mode->hdisplay = native_mode->panel_xres;
160 adjusted_mode->vdisplay = native_mode->panel_yres; 159 adjusted_mode->vdisplay = native_mode->panel_yres;
@@ -197,15 +196,13 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
197 } 196 }
198} 197}
199 198
199
200static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, 200static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
201 struct drm_display_mode *mode, 201 struct drm_display_mode *mode,
202 struct drm_display_mode *adjusted_mode) 202 struct drm_display_mode *adjusted_mode)
203{ 203{
204
205 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 204 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
206 205
207 radeon_encoder->flags &= ~RADEON_USE_RMX;
208
209 drm_mode_set_crtcinfo(adjusted_mode, 0); 206 drm_mode_set_crtcinfo(adjusted_mode, 0);
210 207
211 if (radeon_encoder->rmx_type != RMX_OFF) 208 if (radeon_encoder->rmx_type != RMX_OFF)
@@ -808,234 +805,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
808 805
809} 806}
810 807
811static void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
812{
813
814 WREG32(0x659C, 0x0);
815 WREG32(0x6594, 0x705);
816 WREG32(0x65A4, 0x10001);
817 WREG32(0x65D8, 0x0);
818 WREG32(0x65B0, 0x0);
819 WREG32(0x65C0, 0x0);
820 WREG32(0x65D4, 0x0);
821 WREG32(0x6578, 0x0);
822 WREG32(0x657C, 0x841880A8);
823 WREG32(0x6578, 0x1);
824 WREG32(0x657C, 0x84208680);
825 WREG32(0x6578, 0x2);
826 WREG32(0x657C, 0xBFF880B0);
827 WREG32(0x6578, 0x100);
828 WREG32(0x657C, 0x83D88088);
829 WREG32(0x6578, 0x101);
830 WREG32(0x657C, 0x84608680);
831 WREG32(0x6578, 0x102);
832 WREG32(0x657C, 0xBFF080D0);
833 WREG32(0x6578, 0x200);
834 WREG32(0x657C, 0x83988068);
835 WREG32(0x6578, 0x201);
836 WREG32(0x657C, 0x84A08680);
837 WREG32(0x6578, 0x202);
838 WREG32(0x657C, 0xBFF080F8);
839 WREG32(0x6578, 0x300);
840 WREG32(0x657C, 0x83588058);
841 WREG32(0x6578, 0x301);
842 WREG32(0x657C, 0x84E08660);
843 WREG32(0x6578, 0x302);
844 WREG32(0x657C, 0xBFF88120);
845 WREG32(0x6578, 0x400);
846 WREG32(0x657C, 0x83188040);
847 WREG32(0x6578, 0x401);
848 WREG32(0x657C, 0x85008660);
849 WREG32(0x6578, 0x402);
850 WREG32(0x657C, 0xBFF88150);
851 WREG32(0x6578, 0x500);
852 WREG32(0x657C, 0x82D88030);
853 WREG32(0x6578, 0x501);
854 WREG32(0x657C, 0x85408640);
855 WREG32(0x6578, 0x502);
856 WREG32(0x657C, 0xBFF88180);
857 WREG32(0x6578, 0x600);
858 WREG32(0x657C, 0x82A08018);
859 WREG32(0x6578, 0x601);
860 WREG32(0x657C, 0x85808620);
861 WREG32(0x6578, 0x602);
862 WREG32(0x657C, 0xBFF081B8);
863 WREG32(0x6578, 0x700);
864 WREG32(0x657C, 0x82608010);
865 WREG32(0x6578, 0x701);
866 WREG32(0x657C, 0x85A08600);
867 WREG32(0x6578, 0x702);
868 WREG32(0x657C, 0x800081F0);
869 WREG32(0x6578, 0x800);
870 WREG32(0x657C, 0x8228BFF8);
871 WREG32(0x6578, 0x801);
872 WREG32(0x657C, 0x85E085E0);
873 WREG32(0x6578, 0x802);
874 WREG32(0x657C, 0xBFF88228);
875 WREG32(0x6578, 0x10000);
876 WREG32(0x657C, 0x82A8BF00);
877 WREG32(0x6578, 0x10001);
878 WREG32(0x657C, 0x82A08CC0);
879 WREG32(0x6578, 0x10002);
880 WREG32(0x657C, 0x8008BEF8);
881 WREG32(0x6578, 0x10100);
882 WREG32(0x657C, 0x81F0BF28);
883 WREG32(0x6578, 0x10101);
884 WREG32(0x657C, 0x83608CA0);
885 WREG32(0x6578, 0x10102);
886 WREG32(0x657C, 0x8018BED0);
887 WREG32(0x6578, 0x10200);
888 WREG32(0x657C, 0x8148BF38);
889 WREG32(0x6578, 0x10201);
890 WREG32(0x657C, 0x84408C80);
891 WREG32(0x6578, 0x10202);
892 WREG32(0x657C, 0x8008BEB8);
893 WREG32(0x6578, 0x10300);
894 WREG32(0x657C, 0x80B0BF78);
895 WREG32(0x6578, 0x10301);
896 WREG32(0x657C, 0x85008C20);
897 WREG32(0x6578, 0x10302);
898 WREG32(0x657C, 0x8020BEA0);
899 WREG32(0x6578, 0x10400);
900 WREG32(0x657C, 0x8028BF90);
901 WREG32(0x6578, 0x10401);
902 WREG32(0x657C, 0x85E08BC0);
903 WREG32(0x6578, 0x10402);
904 WREG32(0x657C, 0x8018BE90);
905 WREG32(0x6578, 0x10500);
906 WREG32(0x657C, 0xBFB8BFB0);
907 WREG32(0x6578, 0x10501);
908 WREG32(0x657C, 0x86C08B40);
909 WREG32(0x6578, 0x10502);
910 WREG32(0x657C, 0x8010BE90);
911 WREG32(0x6578, 0x10600);
912 WREG32(0x657C, 0xBF58BFC8);
913 WREG32(0x6578, 0x10601);
914 WREG32(0x657C, 0x87A08AA0);
915 WREG32(0x6578, 0x10602);
916 WREG32(0x657C, 0x8010BE98);
917 WREG32(0x6578, 0x10700);
918 WREG32(0x657C, 0xBF10BFF0);
919 WREG32(0x6578, 0x10701);
920 WREG32(0x657C, 0x886089E0);
921 WREG32(0x6578, 0x10702);
922 WREG32(0x657C, 0x8018BEB0);
923 WREG32(0x6578, 0x10800);
924 WREG32(0x657C, 0xBED8BFE8);
925 WREG32(0x6578, 0x10801);
926 WREG32(0x657C, 0x89408940);
927 WREG32(0x6578, 0x10802);
928 WREG32(0x657C, 0xBFE8BED8);
929 WREG32(0x6578, 0x20000);
930 WREG32(0x657C, 0x80008000);
931 WREG32(0x6578, 0x20001);
932 WREG32(0x657C, 0x90008000);
933 WREG32(0x6578, 0x20002);
934 WREG32(0x657C, 0x80008000);
935 WREG32(0x6578, 0x20003);
936 WREG32(0x657C, 0x80008000);
937 WREG32(0x6578, 0x20100);
938 WREG32(0x657C, 0x80108000);
939 WREG32(0x6578, 0x20101);
940 WREG32(0x657C, 0x8FE0BF70);
941 WREG32(0x6578, 0x20102);
942 WREG32(0x657C, 0xBFE880C0);
943 WREG32(0x6578, 0x20103);
944 WREG32(0x657C, 0x80008000);
945 WREG32(0x6578, 0x20200);
946 WREG32(0x657C, 0x8018BFF8);
947 WREG32(0x6578, 0x20201);
948 WREG32(0x657C, 0x8F80BF08);
949 WREG32(0x6578, 0x20202);
950 WREG32(0x657C, 0xBFD081A0);
951 WREG32(0x6578, 0x20203);
952 WREG32(0x657C, 0xBFF88000);
953 WREG32(0x6578, 0x20300);
954 WREG32(0x657C, 0x80188000);
955 WREG32(0x6578, 0x20301);
956 WREG32(0x657C, 0x8EE0BEC0);
957 WREG32(0x6578, 0x20302);
958 WREG32(0x657C, 0xBFB082A0);
959 WREG32(0x6578, 0x20303);
960 WREG32(0x657C, 0x80008000);
961 WREG32(0x6578, 0x20400);
962 WREG32(0x657C, 0x80188000);
963 WREG32(0x6578, 0x20401);
964 WREG32(0x657C, 0x8E00BEA0);
965 WREG32(0x6578, 0x20402);
966 WREG32(0x657C, 0xBF8883C0);
967 WREG32(0x6578, 0x20403);
968 WREG32(0x657C, 0x80008000);
969 WREG32(0x6578, 0x20500);
970 WREG32(0x657C, 0x80188000);
971 WREG32(0x6578, 0x20501);
972 WREG32(0x657C, 0x8D00BE90);
973 WREG32(0x6578, 0x20502);
974 WREG32(0x657C, 0xBF588500);
975 WREG32(0x6578, 0x20503);
976 WREG32(0x657C, 0x80008008);
977 WREG32(0x6578, 0x20600);
978 WREG32(0x657C, 0x80188000);
979 WREG32(0x6578, 0x20601);
980 WREG32(0x657C, 0x8BC0BE98);
981 WREG32(0x6578, 0x20602);
982 WREG32(0x657C, 0xBF308660);
983 WREG32(0x6578, 0x20603);
984 WREG32(0x657C, 0x80008008);
985 WREG32(0x6578, 0x20700);
986 WREG32(0x657C, 0x80108000);
987 WREG32(0x6578, 0x20701);
988 WREG32(0x657C, 0x8A80BEB0);
989 WREG32(0x6578, 0x20702);
990 WREG32(0x657C, 0xBF0087C0);
991 WREG32(0x6578, 0x20703);
992 WREG32(0x657C, 0x80008008);
993 WREG32(0x6578, 0x20800);
994 WREG32(0x657C, 0x80108000);
995 WREG32(0x6578, 0x20801);
996 WREG32(0x657C, 0x8920BED0);
997 WREG32(0x6578, 0x20802);
998 WREG32(0x657C, 0xBED08920);
999 WREG32(0x6578, 0x20803);
1000 WREG32(0x657C, 0x80008010);
1001 WREG32(0x6578, 0x30000);
1002 WREG32(0x657C, 0x90008000);
1003 WREG32(0x6578, 0x30001);
1004 WREG32(0x657C, 0x80008000);
1005 WREG32(0x6578, 0x30100);
1006 WREG32(0x657C, 0x8FE0BF90);
1007 WREG32(0x6578, 0x30101);
1008 WREG32(0x657C, 0xBFF880A0);
1009 WREG32(0x6578, 0x30200);
1010 WREG32(0x657C, 0x8F60BF40);
1011 WREG32(0x6578, 0x30201);
1012 WREG32(0x657C, 0xBFE88180);
1013 WREG32(0x6578, 0x30300);
1014 WREG32(0x657C, 0x8EC0BF00);
1015 WREG32(0x6578, 0x30301);
1016 WREG32(0x657C, 0xBFC88280);
1017 WREG32(0x6578, 0x30400);
1018 WREG32(0x657C, 0x8DE0BEE0);
1019 WREG32(0x6578, 0x30401);
1020 WREG32(0x657C, 0xBFA083A0);
1021 WREG32(0x6578, 0x30500);
1022 WREG32(0x657C, 0x8CE0BED0);
1023 WREG32(0x6578, 0x30501);
1024 WREG32(0x657C, 0xBF7884E0);
1025 WREG32(0x6578, 0x30600);
1026 WREG32(0x657C, 0x8BA0BED8);
1027 WREG32(0x6578, 0x30601);
1028 WREG32(0x657C, 0xBF508640);
1029 WREG32(0x6578, 0x30700);
1030 WREG32(0x657C, 0x8A60BEE8);
1031 WREG32(0x6578, 0x30701);
1032 WREG32(0x657C, 0xBF2087A0);
1033 WREG32(0x6578, 0x30800);
1034 WREG32(0x657C, 0x8900BF00);
1035 WREG32(0x6578, 0x30801);
1036 WREG32(0x657C, 0xBF008900);
1037}
1038
1039static void 808static void
1040atombios_yuv_setup(struct drm_encoder *encoder, bool enable) 809atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
1041{ 810{
@@ -1074,129 +843,6 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
1074} 843}
1075 844
1076static void 845static void
1077atombios_overscan_setup(struct drm_encoder *encoder,
1078 struct drm_display_mode *mode,
1079 struct drm_display_mode *adjusted_mode)
1080{
1081 struct drm_device *dev = encoder->dev;
1082 struct radeon_device *rdev = dev->dev_private;
1083 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1084 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1085 SET_CRTC_OVERSCAN_PS_ALLOCATION args;
1086 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
1087
1088 memset(&args, 0, sizeof(args));
1089
1090 args.usOverscanRight = 0;
1091 args.usOverscanLeft = 0;
1092 args.usOverscanBottom = 0;
1093 args.usOverscanTop = 0;
1094 args.ucCRTC = radeon_crtc->crtc_id;
1095
1096 if (radeon_encoder->flags & RADEON_USE_RMX) {
1097 if (radeon_encoder->rmx_type == RMX_FULL) {
1098 args.usOverscanRight = 0;
1099 args.usOverscanLeft = 0;
1100 args.usOverscanBottom = 0;
1101 args.usOverscanTop = 0;
1102 } else if (radeon_encoder->rmx_type == RMX_CENTER) {
1103 args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
1104 args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
1105 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
1106 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
1107 } else if (radeon_encoder->rmx_type == RMX_ASPECT) {
1108 int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
1109 int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
1110
1111 if (a1 > a2) {
1112 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
1113 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
1114 } else if (a2 > a1) {
1115 args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
1116 args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
1117 }
1118 }
1119 }
1120
1121 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1122
1123}
1124
1125static void
1126atombios_scaler_setup(struct drm_encoder *encoder)
1127{
1128 struct drm_device *dev = encoder->dev;
1129 struct radeon_device *rdev = dev->dev_private;
1130 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1131 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1132 ENABLE_SCALER_PS_ALLOCATION args;
1133 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
1134 /* fixme - fill in enc_priv for atom dac */
1135 enum radeon_tv_std tv_std = TV_STD_NTSC;
1136
1137 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
1138 return;
1139
1140 memset(&args, 0, sizeof(args));
1141
1142 args.ucScaler = radeon_crtc->crtc_id;
1143
1144 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
1145 switch (tv_std) {
1146 case TV_STD_NTSC:
1147 default:
1148 args.ucTVStandard = ATOM_TV_NTSC;
1149 break;
1150 case TV_STD_PAL:
1151 args.ucTVStandard = ATOM_TV_PAL;
1152 break;
1153 case TV_STD_PAL_M:
1154 args.ucTVStandard = ATOM_TV_PALM;
1155 break;
1156 case TV_STD_PAL_60:
1157 args.ucTVStandard = ATOM_TV_PAL60;
1158 break;
1159 case TV_STD_NTSC_J:
1160 args.ucTVStandard = ATOM_TV_NTSCJ;
1161 break;
1162 case TV_STD_SCART_PAL:
1163 args.ucTVStandard = ATOM_TV_PAL; /* ??? */
1164 break;
1165 case TV_STD_SECAM:
1166 args.ucTVStandard = ATOM_TV_SECAM;
1167 break;
1168 case TV_STD_PAL_CN:
1169 args.ucTVStandard = ATOM_TV_PALCN;
1170 break;
1171 }
1172 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
1173 } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) {
1174 args.ucTVStandard = ATOM_TV_CV;
1175 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
1176 } else if (radeon_encoder->flags & RADEON_USE_RMX) {
1177 if (radeon_encoder->rmx_type == RMX_FULL)
1178 args.ucEnable = ATOM_SCALER_EXPANSION;
1179 else if (radeon_encoder->rmx_type == RMX_CENTER)
1180 args.ucEnable = ATOM_SCALER_CENTER;
1181 else if (radeon_encoder->rmx_type == RMX_ASPECT)
1182 args.ucEnable = ATOM_SCALER_EXPANSION;
1183 } else {
1184 if (ASIC_IS_AVIVO(rdev))
1185 args.ucEnable = ATOM_SCALER_DISABLE;
1186 else
1187 args.ucEnable = ATOM_SCALER_CENTER;
1188 }
1189
1190 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1191
1192 if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
1193 && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
1194 atom_rv515_force_tv_scaler(rdev);
1195 }
1196
1197}
1198
1199static void
1200radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) 846radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1201{ 847{
1202 struct drm_device *dev = encoder->dev; 848 struct drm_device *dev = encoder->dev;
@@ -1448,8 +1094,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1448 radeon_encoder->pixel_clock = adjusted_mode->clock; 1094 radeon_encoder->pixel_clock = adjusted_mode->clock;
1449 1095
1450 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); 1096 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1451 atombios_overscan_setup(encoder, mode, adjusted_mode);
1452 atombios_scaler_setup(encoder);
1453 atombios_set_encoder_crtc_source(encoder); 1097 atombios_set_encoder_crtc_source(encoder);
1454 1098
1455 if (ASIC_IS_AVIVO(rdev)) { 1099 if (ASIC_IS_AVIVO(rdev)) {
@@ -1667,6 +1311,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1667 1311
1668 radeon_encoder->encoder_id = encoder_id; 1312 radeon_encoder->encoder_id = encoder_id;
1669 radeon_encoder->devices = supported_device; 1313 radeon_encoder->devices = supported_device;
1314 radeon_encoder->rmx_type = RMX_OFF;
1670 1315
1671 switch (radeon_encoder->encoder_id) { 1316 switch (radeon_encoder->encoder_id) {
1672 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1317 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fa86d398945e..3206c0ad7b6c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -101,9 +101,10 @@ static int radeonfb_setcolreg(unsigned regno,
101 break; 101 break;
102 case 24: 102 case 24:
103 case 32: 103 case 32:
104 fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | 104 fb->pseudo_palette[regno] =
105 (green & 0xff00) | 105 (((red >> 8) & 0xff) << info->var.red.offset) |
106 ((blue & 0xff00) >> 8); 106 (((green >> 8) & 0xff) << info->var.green.offset) |
107 (((blue >> 8) & 0xff) << info->var.blue.offset);
107 break; 108 break;
108 } 109 }
109 } 110 }
@@ -154,6 +155,7 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var,
154 var->transp.length = 0; 155 var->transp.length = 0;
155 var->transp.offset = 0; 156 var->transp.offset = 0;
156 break; 157 break;
158#ifdef __LITTLE_ENDIAN
157 case 15: 159 case 15:
158 var->red.offset = 10; 160 var->red.offset = 10;
159 var->green.offset = 5; 161 var->green.offset = 5;
@@ -194,6 +196,28 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var,
194 var->transp.length = 8; 196 var->transp.length = 8;
195 var->transp.offset = 24; 197 var->transp.offset = 24;
196 break; 198 break;
199#else
200 case 24:
201 var->red.offset = 8;
202 var->green.offset = 16;
203 var->blue.offset = 24;
204 var->red.length = 8;
205 var->green.length = 8;
206 var->blue.length = 8;
207 var->transp.length = 0;
208 var->transp.offset = 0;
209 break;
210 case 32:
211 var->red.offset = 8;
212 var->green.offset = 16;
213 var->blue.offset = 24;
214 var->red.length = 8;
215 var->green.length = 8;
216 var->blue.length = 8;
217 var->transp.length = 8;
218 var->transp.offset = 0;
219 break;
220#endif
197 default: 221 default:
198 return -EINVAL; 222 return -EINVAL;
199 } 223 }
@@ -447,10 +471,10 @@ static struct notifier_block paniced = {
447 .notifier_call = radeonfb_panic, 471 .notifier_call = radeonfb_panic,
448}; 472};
449 473
450static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp) 474static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
451{ 475{
452 int aligned = width; 476 int aligned = width;
453 int align_large = (ASIC_IS_AVIVO(rdev)); 477 int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
454 int pitch_mask = 0; 478 int pitch_mask = 0;
455 479
456 switch (bpp / 8) { 480 switch (bpp / 8) {
@@ -478,36 +502,42 @@ int radeonfb_create(struct radeon_device *rdev,
478{ 502{
479 struct fb_info *info; 503 struct fb_info *info;
480 struct radeon_fb_device *rfbdev; 504 struct radeon_fb_device *rfbdev;
481 struct drm_framebuffer *fb; 505 struct drm_framebuffer *fb = NULL;
482 struct radeon_framebuffer *rfb; 506 struct radeon_framebuffer *rfb;
483 struct drm_mode_fb_cmd mode_cmd; 507 struct drm_mode_fb_cmd mode_cmd;
484 struct drm_gem_object *gobj = NULL; 508 struct drm_gem_object *gobj = NULL;
485 struct radeon_object *robj = NULL; 509 struct radeon_object *robj = NULL;
486 struct device *device = &rdev->pdev->dev; 510 struct device *device = &rdev->pdev->dev;
487 int size, aligned_size, ret; 511 int size, aligned_size, ret;
512 u64 fb_gpuaddr;
488 void *fbptr = NULL; 513 void *fbptr = NULL;
514 unsigned long tmp;
515 bool fb_tiled = false; /* useful for testing */
489 516
490 mode_cmd.width = surface_width; 517 mode_cmd.width = surface_width;
491 mode_cmd.height = surface_height; 518 mode_cmd.height = surface_height;
492 mode_cmd.bpp = 32; 519 mode_cmd.bpp = 32;
493 /* need to align pitch with crtc limits */ 520 /* need to align pitch with crtc limits */
494 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); 521 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
495 mode_cmd.depth = 24; 522 mode_cmd.depth = 24;
496 523
497 size = mode_cmd.pitch * mode_cmd.height; 524 size = mode_cmd.pitch * mode_cmd.height;
498 aligned_size = ALIGN(size, PAGE_SIZE); 525 aligned_size = ALIGN(size, PAGE_SIZE);
499 526
500 ret = radeon_gem_object_create(rdev, aligned_size, 0, 527 ret = radeon_gem_object_create(rdev, aligned_size, 0,
501 RADEON_GEM_DOMAIN_VRAM, 528 RADEON_GEM_DOMAIN_VRAM,
502 false, ttm_bo_type_kernel, 529 false, ttm_bo_type_kernel,
503 false, &gobj); 530 false, &gobj);
504 if (ret) { 531 if (ret) {
505 printk(KERN_ERR "failed to allocate framebuffer\n"); 532 printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
533 surface_width, surface_height);
506 ret = -ENOMEM; 534 ret = -ENOMEM;
507 goto out; 535 goto out;
508 } 536 }
509 robj = gobj->driver_private; 537 robj = gobj->driver_private;
510 538
539 if (fb_tiled)
540 radeon_object_set_tiling_flags(robj, RADEON_TILING_MACRO|RADEON_TILING_SURFACE, mode_cmd.pitch);
511 mutex_lock(&rdev->ddev->struct_mutex); 541 mutex_lock(&rdev->ddev->struct_mutex);
512 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); 542 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
513 if (fb == NULL) { 543 if (fb == NULL) {
@@ -515,12 +545,19 @@ int radeonfb_create(struct radeon_device *rdev,
515 ret = -ENOMEM; 545 ret = -ENOMEM;
516 goto out_unref; 546 goto out_unref;
517 } 547 }
548 ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
549 if (ret) {
550 printk(KERN_ERR "failed to pin framebuffer\n");
551 ret = -ENOMEM;
552 goto out_unref;
553 }
518 554
519 list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); 555 list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
520 556
521 rfb = to_radeon_framebuffer(fb); 557 rfb = to_radeon_framebuffer(fb);
522 *rfb_p = rfb; 558 *rfb_p = rfb;
523 rdev->fbdev_rfb = rfb; 559 rdev->fbdev_rfb = rfb;
560 rdev->fbdev_robj = robj;
524 561
525 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); 562 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
526 if (info == NULL) { 563 if (info == NULL) {
@@ -529,6 +566,9 @@ int radeonfb_create(struct radeon_device *rdev,
529 } 566 }
530 rfbdev = info->par; 567 rfbdev = info->par;
531 568
569 if (fb_tiled)
570 radeon_object_check_tiling(robj, 0, 0);
571
532 ret = radeon_object_kmap(robj, &fbptr); 572 ret = radeon_object_kmap(robj, &fbptr);
533 if (ret) { 573 if (ret) {
534 goto out_unref; 574 goto out_unref;
@@ -541,13 +581,13 @@ int radeonfb_create(struct radeon_device *rdev,
541 info->fix.xpanstep = 1; /* doing it in hw */ 581 info->fix.xpanstep = 1; /* doing it in hw */
542 info->fix.ypanstep = 1; /* doing it in hw */ 582 info->fix.ypanstep = 1; /* doing it in hw */
543 info->fix.ywrapstep = 0; 583 info->fix.ywrapstep = 0;
544 info->fix.accel = FB_ACCEL_I830; 584 info->fix.accel = FB_ACCEL_NONE;
545 info->fix.type_aux = 0; 585 info->fix.type_aux = 0;
546 info->flags = FBINFO_DEFAULT; 586 info->flags = FBINFO_DEFAULT;
547 info->fbops = &radeonfb_ops; 587 info->fbops = &radeonfb_ops;
548 info->fix.line_length = fb->pitch; 588 info->fix.line_length = fb->pitch;
549 info->screen_base = fbptr; 589 tmp = fb_gpuaddr - rdev->mc.vram_location;
550 info->fix.smem_start = (unsigned long)fbptr; 590 info->fix.smem_start = rdev->mc.aper_base + tmp;
551 info->fix.smem_len = size; 591 info->fix.smem_len = size;
552 info->screen_base = fbptr; 592 info->screen_base = fbptr;
553 info->screen_size = size; 593 info->screen_size = size;
@@ -562,8 +602,13 @@ int radeonfb_create(struct radeon_device *rdev,
562 info->var.width = -1; 602 info->var.width = -1;
563 info->var.xres = fb_width; 603 info->var.xres = fb_width;
564 info->var.yres = fb_height; 604 info->var.yres = fb_height;
565 info->fix.mmio_start = pci_resource_start(rdev->pdev, 2); 605
566 info->fix.mmio_len = pci_resource_len(rdev->pdev, 2); 606 /* setup aperture base/size for vesafb takeover */
607 info->aperture_base = rdev->ddev->mode_config.fb_base;
608 info->aperture_size = rdev->mc.real_vram_size;
609
610 info->fix.mmio_start = 0;
611 info->fix.mmio_len = 0;
567 info->pixmap.size = 64*1024; 612 info->pixmap.size = 64*1024;
568 info->pixmap.buf_align = 8; 613 info->pixmap.buf_align = 8;
569 info->pixmap.access_align = 32; 614 info->pixmap.access_align = 32;
@@ -590,6 +635,7 @@ int radeonfb_create(struct radeon_device *rdev,
590 info->var.transp.offset = 0; 635 info->var.transp.offset = 0;
591 info->var.transp.length = 0; 636 info->var.transp.length = 0;
592 break; 637 break;
638#ifdef __LITTLE_ENDIAN
593 case 15: 639 case 15:
594 info->var.red.offset = 10; 640 info->var.red.offset = 10;
595 info->var.green.offset = 5; 641 info->var.green.offset = 5;
@@ -629,7 +675,29 @@ int radeonfb_create(struct radeon_device *rdev,
629 info->var.transp.offset = 24; 675 info->var.transp.offset = 24;
630 info->var.transp.length = 8; 676 info->var.transp.length = 8;
631 break; 677 break;
678#else
679 case 24:
680 info->var.red.offset = 8;
681 info->var.green.offset = 16;
682 info->var.blue.offset = 24;
683 info->var.red.length = 8;
684 info->var.green.length = 8;
685 info->var.blue.length = 8;
686 info->var.transp.offset = 0;
687 info->var.transp.length = 0;
688 break;
689 case 32:
690 info->var.red.offset = 8;
691 info->var.green.offset = 16;
692 info->var.blue.offset = 24;
693 info->var.red.length = 8;
694 info->var.green.length = 8;
695 info->var.blue.length = 8;
696 info->var.transp.offset = 0;
697 info->var.transp.length = 8;
698 break;
632 default: 699 default:
700#endif
633 break; 701 break;
634 } 702 }
635 703
@@ -644,7 +712,7 @@ out_unref:
644 if (robj) { 712 if (robj) {
645 radeon_object_kunmap(robj); 713 radeon_object_kunmap(robj);
646 } 714 }
647 if (ret) { 715 if (fb && ret) {
648 list_del(&fb->filp_head); 716 list_del(&fb->filp_head);
649 drm_gem_object_unreference(gobj); 717 drm_gem_object_unreference(gobj);
650 drm_framebuffer_cleanup(fb); 718 drm_framebuffer_cleanup(fb);
@@ -813,6 +881,7 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
813 robj = rfb->obj->driver_private; 881 robj = rfb->obj->driver_private;
814 unregister_framebuffer(info); 882 unregister_framebuffer(info);
815 radeon_object_kunmap(robj); 883 radeon_object_kunmap(robj);
884 radeon_object_unpin(robj);
816 framebuffer_release(info); 885 framebuffer_release(info);
817 } 886 }
818 887
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 96afbf5ae2ad..b4e48dd2e859 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -195,7 +195,7 @@ retry:
195 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 195 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
196 radeon_fence_signaled(fence), timeout); 196 radeon_fence_signaled(fence), timeout);
197 if (unlikely(r == -ERESTARTSYS)) { 197 if (unlikely(r == -ERESTARTSYS)) {
198 return -ERESTART; 198 return -EBUSY;
199 } 199 }
200 } else { 200 } else {
201 r = wait_event_timeout(rdev->fence_drv.queue, 201 r = wait_event_timeout(rdev->fence_drv.queue,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index d343a15316ec..2977539880fb 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -177,7 +177,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
177 return -ENOMEM; 177 return -ENOMEM;
178 } 178 }
179 rdev->gart.pages[p] = pagelist[i]; 179 rdev->gart.pages[p] = pagelist[i];
180 page_base = (uint32_t)rdev->gart.pages_addr[p]; 180 page_base = rdev->gart.pages_addr[p];
181 for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { 181 for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
182 radeon_gart_set_page(rdev, t, page_base); 182 radeon_gart_set_page(rdev, t, page_base);
183 page_base += 4096; 183 page_base += 4096;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index eb516034235d..cded5180c752 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -157,9 +157,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
157 struct radeon_device *rdev = dev->dev_private; 157 struct radeon_device *rdev = dev->dev_private;
158 struct drm_radeon_gem_info *args = data; 158 struct drm_radeon_gem_info *args = data;
159 159
160 args->vram_size = rdev->mc.vram_size; 160 args->vram_size = rdev->mc.real_vram_size;
161 /* FIXME: report somethings that makes sense */ 161 /* FIXME: report somethings that makes sense */
162 args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024); 162 args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
163 args->gart_size = rdev->mc.gtt_size; 163 args->gart_size = rdev->mc.gtt_size;
164 return 0; 164 return 0;
165} 165}
@@ -285,3 +285,44 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
285 mutex_unlock(&dev->struct_mutex); 285 mutex_unlock(&dev->struct_mutex);
286 return r; 286 return r;
287} 287}
288
289int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
290 struct drm_file *filp)
291{
292 struct drm_radeon_gem_set_tiling *args = data;
293 struct drm_gem_object *gobj;
294 struct radeon_object *robj;
295 int r = 0;
296
297 DRM_DEBUG("%d \n", args->handle);
298 gobj = drm_gem_object_lookup(dev, filp, args->handle);
299 if (gobj == NULL)
300 return -EINVAL;
301 robj = gobj->driver_private;
302 radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch);
303 mutex_lock(&dev->struct_mutex);
304 drm_gem_object_unreference(gobj);
305 mutex_unlock(&dev->struct_mutex);
306 return r;
307}
308
309int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
310 struct drm_file *filp)
311{
312 struct drm_radeon_gem_get_tiling *args = data;
313 struct drm_gem_object *gobj;
314 struct radeon_object *robj;
315 int r = 0;
316
317 DRM_DEBUG("\n");
318 gobj = drm_gem_object_lookup(dev, filp, args->handle);
319 if (gobj == NULL)
320 return -EINVAL;
321 robj = gobj->driver_private;
322 radeon_object_get_tiling_flags(robj, &args->tiling_flags,
323 &args->pitch);
324 mutex_lock(&dev->struct_mutex);
325 drm_gem_object_unreference(gobj);
326 mutex_unlock(&dev->struct_mutex);
327 return r;
328}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 4612a7c146d1..3357110e30ce 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -58,6 +58,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
58 if (r) { 58 if (r) {
59 DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n"); 59 DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n");
60 radeon_device_fini(rdev); 60 radeon_device_fini(rdev);
61 kfree(rdev);
62 dev->dev_private = NULL;
61 return r; 63 return r;
62 } 64 }
63 return 0; 65 return 0;
@@ -291,5 +293,7 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
291 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), 293 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH),
292 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), 294 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
293 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), 295 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
296 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH),
297 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH),
294}; 298};
295int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 299int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 8086ecf7f03d..7d06dc98a42a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -29,6 +29,171 @@
29#include "radeon_fixed.h" 29#include "radeon_fixed.h"
30#include "radeon.h" 30#include "radeon.h"
31 31
32static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
33 struct drm_display_mode *mode,
34 struct drm_display_mode *adjusted_mode)
35{
36 struct drm_device *dev = crtc->dev;
37 struct radeon_device *rdev = dev->dev_private;
38 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
39 int xres = mode->hdisplay;
40 int yres = mode->vdisplay;
41 bool hscale = true, vscale = true;
42 int hsync_wid;
43 int vsync_wid;
44 int hsync_start;
45 int blank_width;
46 u32 scale, inc, crtc_more_cntl;
47 u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
48 u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
49 u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
50 struct radeon_native_mode *native_mode = &radeon_crtc->native_mode;
51
52 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
53 (RADEON_VERT_STRETCH_RESERVED |
54 RADEON_VERT_AUTO_RATIO_INC);
55 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
56 (RADEON_HORZ_FP_LOOP_STRETCH |
57 RADEON_HORZ_AUTO_RATIO_INC);
58
59 crtc_more_cntl = 0;
60 if ((rdev->family == CHIP_RS100) ||
61 (rdev->family == CHIP_RS200)) {
62 /* This is to workaround the asic bug for RMX, some versions
63 of BIOS dosen't have this register initialized correctly. */
64 crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
65 }
66
67
68 fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
69 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
70
71 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
72 if (!hsync_wid)
73 hsync_wid = 1;
74 hsync_start = mode->crtc_hsync_start - 8;
75
76 fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
77 | ((hsync_wid & 0x3f) << 16)
78 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
79 ? RADEON_CRTC_H_SYNC_POL
80 : 0));
81
82 fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
83 | ((mode->crtc_vdisplay - 1) << 16));
84
85 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
86 if (!vsync_wid)
87 vsync_wid = 1;
88
89 fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
90 | ((vsync_wid & 0x1f) << 16)
91 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
92 ? RADEON_CRTC_V_SYNC_POL
93 : 0));
94
95 fp_horz_vert_active = 0;
96
97 if (native_mode->panel_xres == 0 ||
98 native_mode->panel_yres == 0) {
99 hscale = false;
100 vscale = false;
101 } else {
102 if (xres > native_mode->panel_xres)
103 xres = native_mode->panel_xres;
104 if (yres > native_mode->panel_yres)
105 yres = native_mode->panel_yres;
106
107 if (xres == native_mode->panel_xres)
108 hscale = false;
109 if (yres == native_mode->panel_yres)
110 vscale = false;
111 }
112
113 switch (radeon_crtc->rmx_type) {
114 case RMX_FULL:
115 case RMX_ASPECT:
116 if (!hscale)
117 fp_horz_stretch |= ((xres/8-1) << 16);
118 else {
119 inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
120 scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
121 / native_mode->panel_xres + 1;
122 fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
123 RADEON_HORZ_STRETCH_BLEND |
124 RADEON_HORZ_STRETCH_ENABLE |
125 ((native_mode->panel_xres/8-1) << 16));
126 }
127
128 if (!vscale)
129 fp_vert_stretch |= ((yres-1) << 12);
130 else {
131 inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
132 scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
133 / native_mode->panel_yres + 1;
134 fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
135 RADEON_VERT_STRETCH_ENABLE |
136 RADEON_VERT_STRETCH_BLEND |
137 ((native_mode->panel_yres-1) << 12));
138 }
139 break;
140 case RMX_CENTER:
141 fp_horz_stretch |= ((xres/8-1) << 16);
142 fp_vert_stretch |= ((yres-1) << 12);
143
144 crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
145 RADEON_CRTC_AUTO_VERT_CENTER_EN);
146
147 blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
148 if (blank_width > 110)
149 blank_width = 110;
150
151 fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
152 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
153
154 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
155 if (!hsync_wid)
156 hsync_wid = 1;
157
158 fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
159 | ((hsync_wid & 0x3f) << 16)
160 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
161 ? RADEON_CRTC_H_SYNC_POL
162 : 0));
163
164 fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
165 | ((mode->crtc_vdisplay - 1) << 16));
166
167 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
168 if (!vsync_wid)
169 vsync_wid = 1;
170
171 fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
172 | ((vsync_wid & 0x1f) << 16)
173 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
174 ? RADEON_CRTC_V_SYNC_POL
175 : 0)));
176
177 fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) |
178 (((native_mode->panel_xres / 8) & 0x1ff) << 16));
179 break;
180 case RMX_OFF:
181 default:
182 fp_horz_stretch |= ((xres/8-1) << 16);
183 fp_vert_stretch |= ((yres-1) << 12);
184 break;
185 }
186
187 WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch);
188 WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch);
189 WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl);
190 WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active);
191 WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid);
192 WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid);
193 WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
194 WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
195}
196
32void radeon_restore_common_regs(struct drm_device *dev) 197void radeon_restore_common_regs(struct drm_device *dev)
33{ 198{
34 /* don't need this yet */ 199 /* don't need this yet */
@@ -235,6 +400,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
235 uint64_t base; 400 uint64_t base;
236 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; 401 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
237 uint32_t crtc_pitch, pitch_pixels; 402 uint32_t crtc_pitch, pitch_pixels;
403 uint32_t tiling_flags;
238 404
239 DRM_DEBUG("\n"); 405 DRM_DEBUG("\n");
240 406
@@ -244,7 +410,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
244 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { 410 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
245 return -EINVAL; 411 return -EINVAL;
246 } 412 }
247 crtc_offset = (u32)base; 413 /* if scanout was in GTT this really wouldn't work */
414 /* crtc offset is from display base addr not FB location */
415 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
416
417 base -= radeon_crtc->legacy_display_base_addr;
418
248 crtc_offset_cntl = 0; 419 crtc_offset_cntl = 0;
249 420
250 pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); 421 pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
@@ -253,8 +424,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
253 (crtc->fb->bits_per_pixel * 8)); 424 (crtc->fb->bits_per_pixel * 8));
254 crtc_pitch |= crtc_pitch << 16; 425 crtc_pitch |= crtc_pitch << 16;
255 426
256 /* TODO tiling */ 427 radeon_object_get_tiling_flags(obj->driver_private,
257 if (0) { 428 &tiling_flags, NULL);
429 if (tiling_flags & RADEON_TILING_MICRO)
430 DRM_ERROR("trying to scanout microtiled buffer\n");
431
432 if (tiling_flags & RADEON_TILING_MACRO) {
258 if (ASIC_IS_R300(rdev)) 433 if (ASIC_IS_R300(rdev))
259 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | 434 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
260 R300_CRTC_MICRO_TILE_BUFFER_DIS | 435 R300_CRTC_MICRO_TILE_BUFFER_DIS |
@@ -270,15 +445,13 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
270 crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; 445 crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
271 } 446 }
272 447
273 448 if (tiling_flags & RADEON_TILING_MACRO) {
274 /* TODO more tiling */
275 if (0) {
276 if (ASIC_IS_R300(rdev)) { 449 if (ASIC_IS_R300(rdev)) {
277 crtc_tile_x0_y0 = x | (y << 16); 450 crtc_tile_x0_y0 = x | (y << 16);
278 base &= ~0x7ff; 451 base &= ~0x7ff;
279 } else { 452 } else {
280 int byteshift = crtc->fb->bits_per_pixel >> 4; 453 int byteshift = crtc->fb->bits_per_pixel >> 4;
281 int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; 454 int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11;
282 base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); 455 base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
283 crtc_offset_cntl |= (y % 16); 456 crtc_offset_cntl |= (y % 16);
284 } 457 }
@@ -303,11 +476,9 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
303 476
304 base &= ~7; 477 base &= ~7;
305 478
306 /* update sarea TODO */
307
308 crtc_offset = (u32)base; 479 crtc_offset = (u32)base;
309 480
310 WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location); 481 WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr);
311 482
312 if (ASIC_IS_R300(rdev)) { 483 if (ASIC_IS_R300(rdev)) {
313 if (radeon_crtc->crtc_id) 484 if (radeon_crtc->crtc_id)
@@ -751,6 +922,8 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
751 struct drm_display_mode *mode, 922 struct drm_display_mode *mode,
752 struct drm_display_mode *adjusted_mode) 923 struct drm_display_mode *adjusted_mode)
753{ 924{
925 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
926 return false;
754 return true; 927 return true;
755} 928}
756 929
@@ -759,16 +932,25 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
759 struct drm_display_mode *adjusted_mode, 932 struct drm_display_mode *adjusted_mode,
760 int x, int y, struct drm_framebuffer *old_fb) 933 int x, int y, struct drm_framebuffer *old_fb)
761{ 934{
762 935 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
763 DRM_DEBUG("\n"); 936 struct drm_device *dev = crtc->dev;
937 struct radeon_device *rdev = dev->dev_private;
764 938
765 /* TODO TV */ 939 /* TODO TV */
766
767 radeon_crtc_set_base(crtc, x, y, old_fb); 940 radeon_crtc_set_base(crtc, x, y, old_fb);
768 radeon_set_crtc_timing(crtc, adjusted_mode); 941 radeon_set_crtc_timing(crtc, adjusted_mode);
769 radeon_set_pll(crtc, adjusted_mode); 942 radeon_set_pll(crtc, adjusted_mode);
770 radeon_init_disp_bandwidth(crtc->dev); 943 radeon_bandwidth_update(rdev);
771 944 if (radeon_crtc->crtc_id == 0) {
945 radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
946 } else {
947 if (radeon_crtc->rmx_type != RMX_OFF) {
948 /* FIXME: only first crtc has rmx what should we
949 * do ?
950 */
951 DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
952 }
953 }
772 return 0; 954 return 0;
773} 955}
774 956
@@ -799,478 +981,3 @@ void radeon_legacy_init_crtc(struct drm_device *dev,
799 radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; 981 radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
800 drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); 982 drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
801} 983}
802
803void radeon_init_disp_bw_legacy(struct drm_device *dev,
804 struct drm_display_mode *mode1,
805 uint32_t pixel_bytes1,
806 struct drm_display_mode *mode2,
807 uint32_t pixel_bytes2)
808{
809 struct radeon_device *rdev = dev->dev_private;
810 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
811 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
812 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
813 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
814 fixed20_12 memtcas_ff[8] = {
815 fixed_init(1),
816 fixed_init(2),
817 fixed_init(3),
818 fixed_init(0),
819 fixed_init_half(1),
820 fixed_init_half(2),
821 fixed_init(0),
822 };
823 fixed20_12 memtcas_rs480_ff[8] = {
824 fixed_init(0),
825 fixed_init(1),
826 fixed_init(2),
827 fixed_init(3),
828 fixed_init(0),
829 fixed_init_half(1),
830 fixed_init_half(2),
831 fixed_init_half(3),
832 };
833 fixed20_12 memtcas2_ff[8] = {
834 fixed_init(0),
835 fixed_init(1),
836 fixed_init(2),
837 fixed_init(3),
838 fixed_init(4),
839 fixed_init(5),
840 fixed_init(6),
841 fixed_init(7),
842 };
843 fixed20_12 memtrbs[8] = {
844 fixed_init(1),
845 fixed_init_half(1),
846 fixed_init(2),
847 fixed_init_half(2),
848 fixed_init(3),
849 fixed_init_half(3),
850 fixed_init(4),
851 fixed_init_half(4)
852 };
853 fixed20_12 memtrbs_r4xx[8] = {
854 fixed_init(4),
855 fixed_init(5),
856 fixed_init(6),
857 fixed_init(7),
858 fixed_init(8),
859 fixed_init(9),
860 fixed_init(10),
861 fixed_init(11)
862 };
863 fixed20_12 min_mem_eff;
864 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
865 fixed20_12 cur_latency_mclk, cur_latency_sclk;
866 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
867 disp_drain_rate2, read_return_rate;
868 fixed20_12 time_disp1_drop_priority;
869 int c;
870 int cur_size = 16; /* in octawords */
871 int critical_point = 0, critical_point2;
872/* uint32_t read_return_rate, time_disp1_drop_priority; */
873 int stop_req, max_stop_req;
874
875 min_mem_eff.full = rfixed_const_8(0);
876 /* get modes */
877 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
878 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
879 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
880 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
881 /* check crtc enables */
882 if (mode2)
883 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
884 if (mode1)
885 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
886 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
887 }
888
889 /*
890 * determine is there is enough bw for current mode
891 */
892 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
893 temp_ff.full = rfixed_const(100);
894 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
895 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
896 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
897
898 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
899 temp_ff.full = rfixed_const(temp);
900 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
901
902 pix_clk.full = 0;
903 pix_clk2.full = 0;
904 peak_disp_bw.full = 0;
905 if (mode1) {
906 temp_ff.full = rfixed_const(1000);
907 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
908 pix_clk.full = rfixed_div(pix_clk, temp_ff);
909 temp_ff.full = rfixed_const(pixel_bytes1);
910 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
911 }
912 if (mode2) {
913 temp_ff.full = rfixed_const(1000);
914 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
915 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
916 temp_ff.full = rfixed_const(pixel_bytes2);
917 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
918 }
919
920 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
921 if (peak_disp_bw.full >= mem_bw.full) {
922 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
923 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
924 }
925
926 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
927 temp = RREG32(RADEON_MEM_TIMING_CNTL);
928 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
929 mem_trcd = ((temp >> 2) & 0x3) + 1;
930 mem_trp = ((temp & 0x3)) + 1;
931 mem_tras = ((temp & 0x70) >> 4) + 1;
932 } else if (rdev->family == CHIP_R300 ||
933 rdev->family == CHIP_R350) { /* r300, r350 */
934 mem_trcd = (temp & 0x7) + 1;
935 mem_trp = ((temp >> 8) & 0x7) + 1;
936 mem_tras = ((temp >> 11) & 0xf) + 4;
937 } else if (rdev->family == CHIP_RV350 ||
938 rdev->family <= CHIP_RV380) {
939 /* rv3x0 */
940 mem_trcd = (temp & 0x7) + 3;
941 mem_trp = ((temp >> 8) & 0x7) + 3;
942 mem_tras = ((temp >> 11) & 0xf) + 6;
943 } else if (rdev->family == CHIP_R420 ||
944 rdev->family == CHIP_R423 ||
945 rdev->family == CHIP_RV410) {
946 /* r4xx */
947 mem_trcd = (temp & 0xf) + 3;
948 if (mem_trcd > 15)
949 mem_trcd = 15;
950 mem_trp = ((temp >> 8) & 0xf) + 3;
951 if (mem_trp > 15)
952 mem_trp = 15;
953 mem_tras = ((temp >> 12) & 0x1f) + 6;
954 if (mem_tras > 31)
955 mem_tras = 31;
956 } else { /* RV200, R200 */
957 mem_trcd = (temp & 0x7) + 1;
958 mem_trp = ((temp >> 8) & 0x7) + 1;
959 mem_tras = ((temp >> 12) & 0xf) + 4;
960 }
961 /* convert to FF */
962 trcd_ff.full = rfixed_const(mem_trcd);
963 trp_ff.full = rfixed_const(mem_trp);
964 tras_ff.full = rfixed_const(mem_tras);
965
966 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
967 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
968 data = (temp & (7 << 20)) >> 20;
969 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
970 if (rdev->family == CHIP_RS480) /* don't think rs400 */
971 tcas_ff = memtcas_rs480_ff[data];
972 else
973 tcas_ff = memtcas_ff[data];
974 } else
975 tcas_ff = memtcas2_ff[data];
976
977 if (rdev->family == CHIP_RS400 ||
978 rdev->family == CHIP_RS480) {
979 /* extra cas latency stored in bits 23-25 0-4 clocks */
980 data = (temp >> 23) & 0x7;
981 if (data < 5)
982 tcas_ff.full += rfixed_const(data);
983 }
984
985 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
986 /* on the R300, Tcas is included in Trbs.
987 */
988 temp = RREG32(RADEON_MEM_CNTL);
989 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
990 if (data == 1) {
991 if (R300_MEM_USE_CD_CH_ONLY & temp) {
992 temp = RREG32(R300_MC_IND_INDEX);
993 temp &= ~R300_MC_IND_ADDR_MASK;
994 temp |= R300_MC_READ_CNTL_CD_mcind;
995 WREG32(R300_MC_IND_INDEX, temp);
996 temp = RREG32(R300_MC_IND_DATA);
997 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
998 } else {
999 temp = RREG32(R300_MC_READ_CNTL_AB);
1000 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
1001 }
1002 } else {
1003 temp = RREG32(R300_MC_READ_CNTL_AB);
1004 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
1005 }
1006 if (rdev->family == CHIP_RV410 ||
1007 rdev->family == CHIP_R420 ||
1008 rdev->family == CHIP_R423)
1009 trbs_ff = memtrbs_r4xx[data];
1010 else
1011 trbs_ff = memtrbs[data];
1012 tcas_ff.full += trbs_ff.full;
1013 }
1014
1015 sclk_eff_ff.full = sclk_ff.full;
1016
1017 if (rdev->flags & RADEON_IS_AGP) {
1018 fixed20_12 agpmode_ff;
1019 agpmode_ff.full = rfixed_const(radeon_agpmode);
1020 temp_ff.full = rfixed_const_666(16);
1021 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
1022 }
1023 /* TODO PCIE lanes may affect this - agpmode == 16?? */
1024
1025 if (ASIC_IS_R300(rdev)) {
1026 sclk_delay_ff.full = rfixed_const(250);
1027 } else {
1028 if ((rdev->family == CHIP_RV100) ||
1029 rdev->flags & RADEON_IS_IGP) {
1030 if (rdev->mc.vram_is_ddr)
1031 sclk_delay_ff.full = rfixed_const(41);
1032 else
1033 sclk_delay_ff.full = rfixed_const(33);
1034 } else {
1035 if (rdev->mc.vram_width == 128)
1036 sclk_delay_ff.full = rfixed_const(57);
1037 else
1038 sclk_delay_ff.full = rfixed_const(41);
1039 }
1040 }
1041
1042 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
1043
1044 if (rdev->mc.vram_is_ddr) {
1045 if (rdev->mc.vram_width == 32) {
1046 k1.full = rfixed_const(40);
1047 c = 3;
1048 } else {
1049 k1.full = rfixed_const(20);
1050 c = 1;
1051 }
1052 } else {
1053 k1.full = rfixed_const(40);
1054 c = 3;
1055 }
1056
1057 temp_ff.full = rfixed_const(2);
1058 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
1059 temp_ff.full = rfixed_const(c);
1060 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
1061 temp_ff.full = rfixed_const(4);
1062 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
1063 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
1064 mc_latency_mclk.full += k1.full;
1065
1066 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
1067 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
1068
1069 /*
1070 HW cursor time assuming worst case of full size colour cursor.
1071 */
1072 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
1073 temp_ff.full += trcd_ff.full;
1074 if (temp_ff.full < tras_ff.full)
1075 temp_ff.full = tras_ff.full;
1076 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
1077
1078 temp_ff.full = rfixed_const(cur_size);
1079 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
1080 /*
1081 Find the total latency for the display data.
1082 */
1083 disp_latency_overhead.full = rfixed_const(80);
1084 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
1085 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
1086 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
1087
1088 if (mc_latency_mclk.full > mc_latency_sclk.full)
1089 disp_latency.full = mc_latency_mclk.full;
1090 else
1091 disp_latency.full = mc_latency_sclk.full;
1092
1093 /* setup Max GRPH_STOP_REQ default value */
1094 if (ASIC_IS_RV100(rdev))
1095 max_stop_req = 0x5c;
1096 else
1097 max_stop_req = 0x7c;
1098
1099 if (mode1) {
1100 /* CRTC1
1101 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
1102 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
1103 */
1104 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
1105
1106 if (stop_req > max_stop_req)
1107 stop_req = max_stop_req;
1108
1109 /*
1110 Find the drain rate of the display buffer.
1111 */
1112 temp_ff.full = rfixed_const((16/pixel_bytes1));
1113 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
1114
1115 /*
1116 Find the critical point of the display buffer.
1117 */
1118 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
1119 crit_point_ff.full += rfixed_const_half(0);
1120
1121 critical_point = rfixed_trunc(crit_point_ff);
1122
1123 if (rdev->disp_priority == 2) {
1124 critical_point = 0;
1125 }
1126
1127 /*
1128 The critical point should never be above max_stop_req-4. Setting
1129 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
1130 */
1131 if (max_stop_req - critical_point < 4)
1132 critical_point = 0;
1133
1134 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
1135 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
1136 critical_point = 0x10;
1137 }
1138
1139 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
1140 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
1141 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
1142 temp &= ~(RADEON_GRPH_START_REQ_MASK);
1143 if ((rdev->family == CHIP_R350) &&
1144 (stop_req > 0x15)) {
1145 stop_req -= 0x10;
1146 }
1147 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
1148 temp |= RADEON_GRPH_BUFFER_SIZE;
1149 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
1150 RADEON_GRPH_CRITICAL_AT_SOF |
1151 RADEON_GRPH_STOP_CNTL);
1152 /*
1153 Write the result into the register.
1154 */
1155 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
1156 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
1157
1158#if 0
1159 if ((rdev->family == CHIP_RS400) ||
1160 (rdev->family == CHIP_RS480)) {
1161 /* attempt to program RS400 disp regs correctly ??? */
1162 temp = RREG32(RS400_DISP1_REG_CNTL);
1163 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
1164 RS400_DISP1_STOP_REQ_LEVEL_MASK);
1165 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
1166 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
1167 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
1168 temp = RREG32(RS400_DMIF_MEM_CNTL1);
1169 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
1170 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
1171 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
1172 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
1173 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
1174 }
1175#endif
1176
1177 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
1178 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
1179 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
1180 }
1181
1182 if (mode2) {
1183 u32 grph2_cntl;
1184 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
1185
1186 if (stop_req > max_stop_req)
1187 stop_req = max_stop_req;
1188
1189 /*
1190 Find the drain rate of the display buffer.
1191 */
1192 temp_ff.full = rfixed_const((16/pixel_bytes2));
1193 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
1194
1195 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
1196 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
1197 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
1198 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
1199 if ((rdev->family == CHIP_R350) &&
1200 (stop_req > 0x15)) {
1201 stop_req -= 0x10;
1202 }
1203 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
1204 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
1205 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
1206 RADEON_GRPH_CRITICAL_AT_SOF |
1207 RADEON_GRPH_STOP_CNTL);
1208
1209 if ((rdev->family == CHIP_RS100) ||
1210 (rdev->family == CHIP_RS200))
1211 critical_point2 = 0;
1212 else {
1213 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
1214 temp_ff.full = rfixed_const(temp);
1215 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
1216 if (sclk_ff.full < temp_ff.full)
1217 temp_ff.full = sclk_ff.full;
1218
1219 read_return_rate.full = temp_ff.full;
1220
1221 if (mode1) {
1222 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
1223 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
1224 } else {
1225 time_disp1_drop_priority.full = 0;
1226 }
1227 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
1228 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
1229 crit_point_ff.full += rfixed_const_half(0);
1230
1231 critical_point2 = rfixed_trunc(crit_point_ff);
1232
1233 if (rdev->disp_priority == 2) {
1234 critical_point2 = 0;
1235 }
1236
1237 if (max_stop_req - critical_point2 < 4)
1238 critical_point2 = 0;
1239
1240 }
1241
1242 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
1243 /* some R300 cards have problem with this set to 0 */
1244 critical_point2 = 0x10;
1245 }
1246
1247 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
1248 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
1249
1250 if ((rdev->family == CHIP_RS400) ||
1251 (rdev->family == CHIP_RS480)) {
1252#if 0
1253 /* attempt to program RS400 disp2 regs correctly ??? */
1254 temp = RREG32(RS400_DISP2_REQ_CNTL1);
1255 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
1256 RS400_DISP2_STOP_REQ_LEVEL_MASK);
1257 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
1258 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
1259 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
1260 temp = RREG32(RS400_DISP2_REQ_CNTL2);
1261 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
1262 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
1263 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
1264 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
1265 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
1266#endif
1267 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
1268 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
1269 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
1270 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
1271 }
1272
1273 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
1274 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
1275 }
1276}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 2c2f42de1d4c..34d0f58eb944 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -30,170 +30,6 @@
30#include "atom.h" 30#include "atom.h"
31 31
32 32
33static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder,
34 struct drm_display_mode *mode,
35 struct drm_display_mode *adjusted_mode)
36{
37 struct drm_device *dev = encoder->dev;
38 struct radeon_device *rdev = dev->dev_private;
39 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
40 int xres = mode->hdisplay;
41 int yres = mode->vdisplay;
42 bool hscale = true, vscale = true;
43 int hsync_wid;
44 int vsync_wid;
45 int hsync_start;
46 uint32_t scale, inc;
47 uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active;
48 uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp;
49 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
50
51 DRM_DEBUG("\n");
52
53 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
54 (RADEON_VERT_STRETCH_RESERVED |
55 RADEON_VERT_AUTO_RATIO_INC);
56 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
57 (RADEON_HORZ_FP_LOOP_STRETCH |
58 RADEON_HORZ_AUTO_RATIO_INC);
59
60 crtc_more_cntl = 0;
61 if ((rdev->family == CHIP_RS100) ||
62 (rdev->family == CHIP_RS200)) {
63 /* This is to workaround the asic bug for RMX, some versions
64 of BIOS dosen't have this register initialized correctly. */
65 crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
66 }
67
68
69 fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
70 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
71
72 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
73 if (!hsync_wid)
74 hsync_wid = 1;
75 hsync_start = mode->crtc_hsync_start - 8;
76
77 fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
78 | ((hsync_wid & 0x3f) << 16)
79 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
80 ? RADEON_CRTC_H_SYNC_POL
81 : 0));
82
83 fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
84 | ((mode->crtc_vdisplay - 1) << 16));
85
86 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
87 if (!vsync_wid)
88 vsync_wid = 1;
89
90 fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
91 | ((vsync_wid & 0x1f) << 16)
92 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
93 ? RADEON_CRTC_V_SYNC_POL
94 : 0));
95
96 fp_horz_vert_active = 0;
97
98 if (native_mode->panel_xres == 0 ||
99 native_mode->panel_yres == 0) {
100 hscale = false;
101 vscale = false;
102 } else {
103 if (xres > native_mode->panel_xres)
104 xres = native_mode->panel_xres;
105 if (yres > native_mode->panel_yres)
106 yres = native_mode->panel_yres;
107
108 if (xres == native_mode->panel_xres)
109 hscale = false;
110 if (yres == native_mode->panel_yres)
111 vscale = false;
112 }
113
114 if (radeon_encoder->flags & RADEON_USE_RMX) {
115 if (radeon_encoder->rmx_type != RMX_CENTER) {
116 if (!hscale)
117 fp_horz_stretch |= ((xres/8-1) << 16);
118 else {
119 inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
120 scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
121 / native_mode->panel_xres + 1;
122 fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
123 RADEON_HORZ_STRETCH_BLEND |
124 RADEON_HORZ_STRETCH_ENABLE |
125 ((native_mode->panel_xres/8-1) << 16));
126 }
127
128 if (!vscale)
129 fp_vert_stretch |= ((yres-1) << 12);
130 else {
131 inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
132 scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
133 / native_mode->panel_yres + 1;
134 fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
135 RADEON_VERT_STRETCH_ENABLE |
136 RADEON_VERT_STRETCH_BLEND |
137 ((native_mode->panel_yres-1) << 12));
138 }
139 } else if (radeon_encoder->rmx_type == RMX_CENTER) {
140 int blank_width;
141
142 fp_horz_stretch |= ((xres/8-1) << 16);
143 fp_vert_stretch |= ((yres-1) << 12);
144
145 crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
146 RADEON_CRTC_AUTO_VERT_CENTER_EN);
147
148 blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
149 if (blank_width > 110)
150 blank_width = 110;
151
152 fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
153 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
154
155 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
156 if (!hsync_wid)
157 hsync_wid = 1;
158
159 fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
160 | ((hsync_wid & 0x3f) << 16)
161 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
162 ? RADEON_CRTC_H_SYNC_POL
163 : 0));
164
165 fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
166 | ((mode->crtc_vdisplay - 1) << 16));
167
168 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
169 if (!vsync_wid)
170 vsync_wid = 1;
171
172 fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
173 | ((vsync_wid & 0x1f) << 16)
174 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
175 ? RADEON_CRTC_V_SYNC_POL
176 : 0)));
177
178 fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) |
179 (((native_mode->panel_xres / 8) & 0x1ff) << 16));
180 }
181 } else {
182 fp_horz_stretch |= ((xres/8-1) << 16);
183 fp_vert_stretch |= ((yres-1) << 12);
184 }
185
186 WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch);
187 WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch);
188 WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl);
189 WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active);
190 WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid);
191 WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid);
192 WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
193 WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
194
195}
196
197static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) 33static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
198{ 34{
199 struct drm_device *dev = encoder->dev; 35 struct drm_device *dev = encoder->dev;
@@ -287,9 +123,6 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
287 123
288 DRM_DEBUG("\n"); 124 DRM_DEBUG("\n");
289 125
290 if (radeon_crtc->crtc_id == 0)
291 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
292
293 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); 126 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
294 lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; 127 lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
295 128
@@ -318,7 +151,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
318 151
319 if (radeon_crtc->crtc_id == 0) { 152 if (radeon_crtc->crtc_id == 0) {
320 if (ASIC_IS_R300(rdev)) { 153 if (ASIC_IS_R300(rdev)) {
321 if (radeon_encoder->flags & RADEON_USE_RMX) 154 if (radeon_encoder->rmx_type != RMX_OFF)
322 lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; 155 lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX;
323 } else 156 } else
324 lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; 157 lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2;
@@ -350,8 +183,6 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
350 183
351 drm_mode_set_crtcinfo(adjusted_mode, 0); 184 drm_mode_set_crtcinfo(adjusted_mode, 0);
352 185
353 radeon_encoder->flags &= ~RADEON_USE_RMX;
354
355 if (radeon_encoder->rmx_type != RMX_OFF) 186 if (radeon_encoder->rmx_type != RMX_OFF)
356 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); 187 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
357 188
@@ -455,9 +286,6 @@ static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
455 286
456 DRM_DEBUG("\n"); 287 DRM_DEBUG("\n");
457 288
458 if (radeon_crtc->crtc_id == 0)
459 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
460
461 if (radeon_crtc->crtc_id == 0) { 289 if (radeon_crtc->crtc_id == 0) {
462 if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { 290 if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
463 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & 291 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
@@ -653,9 +481,6 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
653 481
654 DRM_DEBUG("\n"); 482 DRM_DEBUG("\n");
655 483
656 if (radeon_crtc->crtc_id == 0)
657 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
658
659 tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); 484 tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
660 tmp &= 0xfffff; 485 tmp &= 0xfffff;
661 if (rdev->family == CHIP_RV280) { 486 if (rdev->family == CHIP_RV280) {
@@ -711,7 +536,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
711 if (radeon_crtc->crtc_id == 0) { 536 if (radeon_crtc->crtc_id == 0) {
712 if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { 537 if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
713 fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; 538 fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
714 if (radeon_encoder->flags & RADEON_USE_RMX) 539 if (radeon_encoder->rmx_type != RMX_OFF)
715 fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; 540 fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
716 else 541 else
717 fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; 542 fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
@@ -820,9 +645,6 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
820 645
821 DRM_DEBUG("\n"); 646 DRM_DEBUG("\n");
822 647
823 if (radeon_crtc->crtc_id == 0)
824 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
825
826 if (rdev->is_atom_bios) { 648 if (rdev->is_atom_bios) {
827 radeon_encoder->pixel_clock = adjusted_mode->clock; 649 radeon_encoder->pixel_clock = adjusted_mode->clock;
828 atombios_external_tmds_setup(encoder, ATOM_ENABLE); 650 atombios_external_tmds_setup(encoder, ATOM_ENABLE);
@@ -856,7 +678,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
856 if (radeon_crtc->crtc_id == 0) { 678 if (radeon_crtc->crtc_id == 0) {
857 if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { 679 if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
858 fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; 680 fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
859 if (radeon_encoder->flags & RADEON_USE_RMX) 681 if (radeon_encoder->rmx_type != RMX_OFF)
860 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; 682 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX;
861 else 683 else
862 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; 684 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1;
@@ -1014,9 +836,6 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
1014 836
1015 DRM_DEBUG("\n"); 837 DRM_DEBUG("\n");
1016 838
1017 if (radeon_crtc->crtc_id == 0)
1018 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
1019
1020 if (rdev->family != CHIP_R200) { 839 if (rdev->family != CHIP_R200) {
1021 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 840 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1022 if (rdev->family == CHIP_R420 || 841 if (rdev->family == CHIP_R420 ||
@@ -1243,6 +1062,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1243 1062
1244 radeon_encoder->encoder_id = encoder_id; 1063 radeon_encoder->encoder_id = encoder_id;
1245 radeon_encoder->devices = supported_device; 1064 radeon_encoder->devices = supported_device;
1065 radeon_encoder->rmx_type = RMX_OFF;
1246 1066
1247 switch (radeon_encoder->encoder_id) { 1067 switch (radeon_encoder->encoder_id) {
1248 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1068 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 9173b687462b..3b09a1f2d8f9 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -36,6 +36,9 @@
36#include <linux/i2c.h> 36#include <linux/i2c.h>
37#include <linux/i2c-id.h> 37#include <linux/i2c-id.h>
38#include <linux/i2c-algo-bit.h> 38#include <linux/i2c-algo-bit.h>
39#include "radeon_fixed.h"
40
41struct radeon_device;
39 42
40#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) 43#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
41#define to_radeon_connector(x) container_of(x, struct radeon_connector, base) 44#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
@@ -124,6 +127,7 @@ struct radeon_tmds_pll {
124#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) 127#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8)
125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) 128#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 129#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
130#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
127 131
128struct radeon_pll { 132struct radeon_pll {
129 uint16_t reference_freq; 133 uint16_t reference_freq;
@@ -170,6 +174,18 @@ struct radeon_mode_info {
170 struct atom_context *atom_context; 174 struct atom_context *atom_context;
171 enum radeon_connector_table connector_table; 175 enum radeon_connector_table connector_table;
172 bool mode_config_initialized; 176 bool mode_config_initialized;
177 struct radeon_crtc *crtcs[2];
178};
179
180struct radeon_native_mode {
181 /* preferred mode */
182 uint32_t panel_xres, panel_yres;
183 uint32_t hoverplus, hsync_width;
184 uint32_t hblank;
185 uint32_t voverplus, vsync_width;
186 uint32_t vblank;
187 uint32_t dotclock;
188 uint32_t flags;
173}; 189};
174 190
175struct radeon_crtc { 191struct radeon_crtc {
@@ -185,19 +201,13 @@ struct radeon_crtc {
185 uint64_t cursor_addr; 201 uint64_t cursor_addr;
186 int cursor_width; 202 int cursor_width;
187 int cursor_height; 203 int cursor_height;
188}; 204 uint32_t legacy_display_base_addr;
189 205 uint32_t legacy_cursor_offset;
190#define RADEON_USE_RMX 1 206 enum radeon_rmx_type rmx_type;
191 207 uint32_t devices;
192struct radeon_native_mode { 208 fixed20_12 vsc;
193 /* preferred mode */ 209 fixed20_12 hsc;
194 uint32_t panel_xres, panel_yres; 210 struct radeon_native_mode native_mode;
195 uint32_t hoverplus, hsync_width;
196 uint32_t hblank;
197 uint32_t voverplus, vsync_width;
198 uint32_t vblank;
199 uint32_t dotclock;
200 uint32_t flags;
201}; 211};
202 212
203struct radeon_encoder_primary_dac { 213struct radeon_encoder_primary_dac {
@@ -383,16 +393,9 @@ void radeon_enc_destroy(struct drm_encoder *encoder);
383void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); 393void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
384void radeon_combios_asic_init(struct drm_device *dev); 394void radeon_combios_asic_init(struct drm_device *dev);
385extern int radeon_static_clocks_init(struct drm_device *dev); 395extern int radeon_static_clocks_init(struct drm_device *dev);
386void radeon_init_disp_bw_legacy(struct drm_device *dev, 396bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
387 struct drm_display_mode *mode1, 397 struct drm_display_mode *mode,
388 uint32_t pixel_bytes1, 398 struct drm_display_mode *adjusted_mode);
389 struct drm_display_mode *mode2, 399void atom_rv515_force_tv_scaler(struct radeon_device *rdev);
390 uint32_t pixel_bytes2);
391void radeon_init_disp_bw_avivo(struct drm_device *dev,
392 struct drm_display_mode *mode1,
393 uint32_t pixel_bytes1,
394 struct drm_display_mode *mode2,
395 uint32_t pixel_bytes2);
396void radeon_init_disp_bandwidth(struct drm_device *dev);
397 400
398#endif 401#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 983e8df5e000..e98cae3bf4a6 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -44,6 +44,9 @@ struct radeon_object {
44 uint64_t gpu_addr; 44 uint64_t gpu_addr;
45 void *kptr; 45 void *kptr;
46 bool is_iomem; 46 bool is_iomem;
47 uint32_t tiling_flags;
48 uint32_t pitch;
49 int surface_reg;
47}; 50};
48 51
49int radeon_ttm_init(struct radeon_device *rdev); 52int radeon_ttm_init(struct radeon_device *rdev);
@@ -70,6 +73,7 @@ static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
70 73
71 robj = container_of(tobj, struct radeon_object, tobj); 74 robj = container_of(tobj, struct radeon_object, tobj);
72 list_del_init(&robj->list); 75 list_del_init(&robj->list);
76 radeon_object_clear_surface_reg(robj);
73 kfree(robj); 77 kfree(robj);
74} 78}
75 79
@@ -99,16 +103,16 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
99{ 103{
100 uint32_t flags = 0; 104 uint32_t flags = 0;
101 if (domain & RADEON_GEM_DOMAIN_VRAM) { 105 if (domain & RADEON_GEM_DOMAIN_VRAM) {
102 flags |= TTM_PL_FLAG_VRAM; 106 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
103 } 107 }
104 if (domain & RADEON_GEM_DOMAIN_GTT) { 108 if (domain & RADEON_GEM_DOMAIN_GTT) {
105 flags |= TTM_PL_FLAG_TT; 109 flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
106 } 110 }
107 if (domain & RADEON_GEM_DOMAIN_CPU) { 111 if (domain & RADEON_GEM_DOMAIN_CPU) {
108 flags |= TTM_PL_FLAG_SYSTEM; 112 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
109 } 113 }
110 if (!flags) { 114 if (!flags) {
111 flags |= TTM_PL_FLAG_SYSTEM; 115 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
112 } 116 }
113 return flags; 117 return flags;
114} 118}
@@ -141,6 +145,7 @@ int radeon_object_create(struct radeon_device *rdev,
141 } 145 }
142 robj->rdev = rdev; 146 robj->rdev = rdev;
143 robj->gobj = gobj; 147 robj->gobj = gobj;
148 robj->surface_reg = -1;
144 INIT_LIST_HEAD(&robj->list); 149 INIT_LIST_HEAD(&robj->list);
145 150
146 flags = radeon_object_flags_from_domain(domain); 151 flags = radeon_object_flags_from_domain(domain);
@@ -223,7 +228,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
223{ 228{
224 uint32_t flags; 229 uint32_t flags;
225 uint32_t tmp; 230 uint32_t tmp;
226 void *fbptr;
227 int r; 231 int r;
228 232
229 flags = radeon_object_flags_from_domain(domain); 233 flags = radeon_object_flags_from_domain(domain);
@@ -242,10 +246,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
242 DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); 246 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
243 return r; 247 return r;
244 } 248 }
245 if (robj->rdev->fbdev_robj == robj) {
246 mutex_lock(&robj->rdev->fbdev_info->lock);
247 radeon_object_kunmap(robj);
248 }
249 tmp = robj->tobj.mem.placement; 249 tmp = robj->tobj.mem.placement;
250 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); 250 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
251 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; 251 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
@@ -261,23 +261,12 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
261 DRM_ERROR("radeon: failed to pin object.\n"); 261 DRM_ERROR("radeon: failed to pin object.\n");
262 } 262 }
263 radeon_object_unreserve(robj); 263 radeon_object_unreserve(robj);
264 if (robj->rdev->fbdev_robj == robj) {
265 if (!r) {
266 r = radeon_object_kmap(robj, &fbptr);
267 }
268 if (!r) {
269 robj->rdev->fbdev_info->screen_base = fbptr;
270 robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
271 }
272 mutex_unlock(&robj->rdev->fbdev_info->lock);
273 }
274 return r; 264 return r;
275} 265}
276 266
277void radeon_object_unpin(struct radeon_object *robj) 267void radeon_object_unpin(struct radeon_object *robj)
278{ 268{
279 uint32_t flags; 269 uint32_t flags;
280 void *fbptr;
281 int r; 270 int r;
282 271
283 spin_lock(&robj->tobj.lock); 272 spin_lock(&robj->tobj.lock);
@@ -297,10 +286,6 @@ void radeon_object_unpin(struct radeon_object *robj)
297 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); 286 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
298 return; 287 return;
299 } 288 }
300 if (robj->rdev->fbdev_robj == robj) {
301 mutex_lock(&robj->rdev->fbdev_info->lock);
302 radeon_object_kunmap(robj);
303 }
304 flags = robj->tobj.mem.placement; 289 flags = robj->tobj.mem.placement;
305 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; 290 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
306 r = ttm_buffer_object_validate(&robj->tobj, 291 r = ttm_buffer_object_validate(&robj->tobj,
@@ -310,16 +295,6 @@ void radeon_object_unpin(struct radeon_object *robj)
310 DRM_ERROR("radeon: failed to unpin buffer.\n"); 295 DRM_ERROR("radeon: failed to unpin buffer.\n");
311 } 296 }
312 radeon_object_unreserve(robj); 297 radeon_object_unreserve(robj);
313 if (robj->rdev->fbdev_robj == robj) {
314 if (!r) {
315 r = radeon_object_kmap(robj, &fbptr);
316 }
317 if (!r) {
318 robj->rdev->fbdev_info->screen_base = fbptr;
319 robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
320 }
321 mutex_unlock(&robj->rdev->fbdev_info->lock);
322 }
323} 298}
324 299
325int radeon_object_wait(struct radeon_object *robj) 300int radeon_object_wait(struct radeon_object *robj)
@@ -334,7 +309,7 @@ int radeon_object_wait(struct radeon_object *robj)
334 } 309 }
335 spin_lock(&robj->tobj.lock); 310 spin_lock(&robj->tobj.lock);
336 if (robj->tobj.sync_obj) { 311 if (robj->tobj.sync_obj) {
337 r = ttm_bo_wait(&robj->tobj, true, false, false); 312 r = ttm_bo_wait(&robj->tobj, true, true, false);
338 } 313 }
339 spin_unlock(&robj->tobj.lock); 314 spin_unlock(&robj->tobj.lock);
340 radeon_object_unreserve(robj); 315 radeon_object_unreserve(robj);
@@ -433,7 +408,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
433 struct radeon_object *robj; 408 struct radeon_object *robj;
434 struct radeon_fence *old_fence = NULL; 409 struct radeon_fence *old_fence = NULL;
435 struct list_head *i; 410 struct list_head *i;
436 uint32_t flags;
437 int r; 411 int r;
438 412
439 r = radeon_object_list_reserve(head); 413 r = radeon_object_list_reserve(head);
@@ -444,27 +418,25 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
444 list_for_each(i, head) { 418 list_for_each(i, head) {
445 lobj = list_entry(i, struct radeon_object_list, list); 419 lobj = list_entry(i, struct radeon_object_list, list);
446 robj = lobj->robj; 420 robj = lobj->robj;
447 if (lobj->wdomain) {
448 flags = radeon_object_flags_from_domain(lobj->wdomain);
449 flags |= TTM_PL_FLAG_TT;
450 } else {
451 flags = radeon_object_flags_from_domain(lobj->rdomain);
452 flags |= TTM_PL_FLAG_TT;
453 flags |= TTM_PL_FLAG_VRAM;
454 }
455 if (!robj->pin_count) { 421 if (!robj->pin_count) {
456 robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; 422 if (lobj->wdomain) {
423 robj->tobj.proposed_placement =
424 radeon_object_flags_from_domain(lobj->wdomain);
425 } else {
426 robj->tobj.proposed_placement =
427 radeon_object_flags_from_domain(lobj->rdomain);
428 }
457 r = ttm_buffer_object_validate(&robj->tobj, 429 r = ttm_buffer_object_validate(&robj->tobj,
458 robj->tobj.proposed_placement, 430 robj->tobj.proposed_placement,
459 true, false); 431 true, false);
460 if (unlikely(r)) { 432 if (unlikely(r)) {
461 radeon_object_list_unreserve(head);
462 DRM_ERROR("radeon: failed to validate.\n"); 433 DRM_ERROR("radeon: failed to validate.\n");
463 return r; 434 return r;
464 } 435 }
465 radeon_object_gpu_addr(robj); 436 radeon_object_gpu_addr(robj);
466 } 437 }
467 lobj->gpu_offset = robj->gpu_addr; 438 lobj->gpu_offset = robj->gpu_addr;
439 lobj->tiling_flags = robj->tiling_flags;
468 if (fence) { 440 if (fence) {
469 old_fence = (struct radeon_fence *)robj->tobj.sync_obj; 441 old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
470 robj->tobj.sync_obj = radeon_fence_ref(fence); 442 robj->tobj.sync_obj = radeon_fence_ref(fence);
@@ -509,3 +481,127 @@ unsigned long radeon_object_size(struct radeon_object *robj)
509{ 481{
510 return robj->tobj.num_pages << PAGE_SHIFT; 482 return robj->tobj.num_pages << PAGE_SHIFT;
511} 483}
484
485int radeon_object_get_surface_reg(struct radeon_object *robj)
486{
487 struct radeon_device *rdev = robj->rdev;
488 struct radeon_surface_reg *reg;
489 struct radeon_object *old_object;
490 int steal;
491 int i;
492
493 if (!robj->tiling_flags)
494 return 0;
495
496 if (robj->surface_reg >= 0) {
497 reg = &rdev->surface_regs[robj->surface_reg];
498 i = robj->surface_reg;
499 goto out;
500 }
501
502 steal = -1;
503 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
504
505 reg = &rdev->surface_regs[i];
506 if (!reg->robj)
507 break;
508
509 old_object = reg->robj;
510 if (old_object->pin_count == 0)
511 steal = i;
512 }
513
514 /* if we are all out */
515 if (i == RADEON_GEM_MAX_SURFACES) {
516 if (steal == -1)
517 return -ENOMEM;
518 /* find someone with a surface reg and nuke their BO */
519 reg = &rdev->surface_regs[steal];
520 old_object = reg->robj;
521 /* blow away the mapping */
522 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
523 ttm_bo_unmap_virtual(&old_object->tobj);
524 old_object->surface_reg = -1;
525 i = steal;
526 }
527
528 robj->surface_reg = i;
529 reg->robj = robj;
530
531out:
532 radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
533 robj->tobj.mem.mm_node->start << PAGE_SHIFT,
534 robj->tobj.num_pages << PAGE_SHIFT);
535 return 0;
536}
537
538void radeon_object_clear_surface_reg(struct radeon_object *robj)
539{
540 struct radeon_device *rdev = robj->rdev;
541 struct radeon_surface_reg *reg;
542
543 if (robj->surface_reg == -1)
544 return;
545
546 reg = &rdev->surface_regs[robj->surface_reg];
547 radeon_clear_surface_reg(rdev, robj->surface_reg);
548
549 reg->robj = NULL;
550 robj->surface_reg = -1;
551}
552
553void radeon_object_set_tiling_flags(struct radeon_object *robj,
554 uint32_t tiling_flags, uint32_t pitch)
555{
556 robj->tiling_flags = tiling_flags;
557 robj->pitch = pitch;
558}
559
560void radeon_object_get_tiling_flags(struct radeon_object *robj,
561 uint32_t *tiling_flags,
562 uint32_t *pitch)
563{
564 if (tiling_flags)
565 *tiling_flags = robj->tiling_flags;
566 if (pitch)
567 *pitch = robj->pitch;
568}
569
570int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
571 bool force_drop)
572{
573 if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
574 return 0;
575
576 if (force_drop) {
577 radeon_object_clear_surface_reg(robj);
578 return 0;
579 }
580
581 if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
582 if (!has_moved)
583 return 0;
584
585 if (robj->surface_reg >= 0)
586 radeon_object_clear_surface_reg(robj);
587 return 0;
588 }
589
590 if ((robj->surface_reg >= 0) && !has_moved)
591 return 0;
592
593 return radeon_object_get_surface_reg(robj);
594}
595
596void radeon_bo_move_notify(struct ttm_buffer_object *bo,
597 struct ttm_mem_reg *mem)
598{
599 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
600 radeon_object_check_tiling(robj, 0, 1);
601}
602
603void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
604{
605 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
606 radeon_object_check_tiling(robj, 0, 0);
607}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index a853261d1881..60d159308b88 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -126,32 +126,19 @@ static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
126 } 126 }
127} 127}
128 128
129static void radeon_ib_cpu_flush(struct radeon_device *rdev,
130 struct radeon_ib *ib)
131{
132 unsigned long tmp;
133 unsigned i;
134
135 /* To force CPU cache flush ugly but seems reliable */
136 for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) {
137 tmp = readl(&ib->ptr[i]);
138 }
139}
140
141int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 129int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
142{ 130{
143 int r = 0; 131 int r = 0;
144 132
145 mutex_lock(&rdev->ib_pool.mutex); 133 mutex_lock(&rdev->ib_pool.mutex);
146 radeon_ib_align(rdev, ib); 134 radeon_ib_align(rdev, ib);
147 radeon_ib_cpu_flush(rdev, ib);
148 if (!ib->length_dw || !rdev->cp.ready) { 135 if (!ib->length_dw || !rdev->cp.ready) {
149 /* TODO: Nothings in the ib we should report. */ 136 /* TODO: Nothings in the ib we should report. */
150 mutex_unlock(&rdev->ib_pool.mutex); 137 mutex_unlock(&rdev->ib_pool.mutex);
151 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
152 return -EINVAL; 139 return -EINVAL;
153 } 140 }
154 /* 64 dwords should be enought for fence too */ 141 /* 64 dwords should be enough for fence too */
155 r = radeon_ring_lock(rdev, 64); 142 r = radeon_ring_lock(rdev, 64);
156 if (r) { 143 if (r) {
157 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); 144 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_share.h b/drivers/gpu/drm/radeon/radeon_share.h
new file mode 100644
index 000000000000..63a773578f17
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_share.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_SHARE_H__
29#define __RADEON_SHARE_H__
30
31void r100_vram_init_sizes(struct radeon_device *rdev);
32
33void rs690_line_buffer_adjust(struct radeon_device *rdev,
34 struct drm_display_mode *mode1,
35 struct drm_display_mode *mode2);
36
37void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
38
39#endif
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
new file mode 100644
index 000000000000..03c33cf4e14c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -0,0 +1,209 @@
1/*
2 * Copyright 2009 VMware, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Michel Dänzer
23 */
24#include <drm/drmP.h>
25#include <drm/radeon_drm.h>
26#include "radeon_reg.h"
27#include "radeon.h"
28
29
30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
31void radeon_test_moves(struct radeon_device *rdev)
32{
33 struct radeon_object *vram_obj = NULL;
34 struct radeon_object **gtt_obj = NULL;
35 struct radeon_fence *fence = NULL;
36 uint64_t gtt_addr, vram_addr;
37 unsigned i, n, size;
38 int r;
39
40 size = 1024 * 1024;
41
42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffer) / test size
44 */
45 n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 -
46 rdev->cp.ring_size) / size;
47
48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
49 if (!gtt_obj) {
50 DRM_ERROR("Failed to allocate %d pointers\n", n);
51 r = 1;
52 goto out_cleanup;
53 }
54
55 r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
56 false, &vram_obj);
57 if (r) {
58 DRM_ERROR("Failed to create VRAM object\n");
59 goto out_cleanup;
60 }
61
62 r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
63 if (r) {
64 DRM_ERROR("Failed to pin VRAM object\n");
65 goto out_cleanup;
66 }
67
68 for (i = 0; i < n; i++) {
69 void *gtt_map, *vram_map;
70 void **gtt_start, **gtt_end;
71 void **vram_start, **vram_end;
72
73 r = radeon_object_create(rdev, NULL, size, true,
74 RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i);
75 if (r) {
76 DRM_ERROR("Failed to create GTT object %d\n", i);
77 goto out_cleanup;
78 }
79
80 r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
81 if (r) {
82 DRM_ERROR("Failed to pin GTT object %d\n", i);
83 goto out_cleanup;
84 }
85
86 r = radeon_object_kmap(gtt_obj[i], &gtt_map);
87 if (r) {
88 DRM_ERROR("Failed to map GTT object %d\n", i);
89 goto out_cleanup;
90 }
91
92 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
93 gtt_start < gtt_end;
94 gtt_start++)
95 *gtt_start = gtt_start;
96
97 radeon_object_kunmap(gtt_obj[i]);
98
99 r = radeon_fence_create(rdev, &fence);
100 if (r) {
101 DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
102 goto out_cleanup;
103 }
104
105 r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence);
106 if (r) {
107 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
108 goto out_cleanup;
109 }
110
111 r = radeon_fence_wait(fence, false);
112 if (r) {
113 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
114 goto out_cleanup;
115 }
116
117 radeon_fence_unref(&fence);
118
119 r = radeon_object_kmap(vram_obj, &vram_map);
120 if (r) {
121 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
122 goto out_cleanup;
123 }
124
125 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
126 vram_start = vram_map, vram_end = vram_map + size;
127 vram_start < vram_end;
128 gtt_start++, vram_start++) {
129 if (*vram_start != gtt_start) {
130 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
131 "expected 0x%p (GTT map 0x%p-0x%p)\n",
132 i, *vram_start, gtt_start, gtt_map,
133 gtt_end);
134 radeon_object_kunmap(vram_obj);
135 goto out_cleanup;
136 }
137 *vram_start = vram_start;
138 }
139
140 radeon_object_kunmap(vram_obj);
141
142 r = radeon_fence_create(rdev, &fence);
143 if (r) {
144 DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
145 goto out_cleanup;
146 }
147
148 r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence);
149 if (r) {
150 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
151 goto out_cleanup;
152 }
153
154 r = radeon_fence_wait(fence, false);
155 if (r) {
156 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
157 goto out_cleanup;
158 }
159
160 radeon_fence_unref(&fence);
161
162 r = radeon_object_kmap(gtt_obj[i], &gtt_map);
163 if (r) {
164 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
165 goto out_cleanup;
166 }
167
168 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
169 vram_start = vram_map, vram_end = vram_map + size;
170 gtt_start < gtt_end;
171 gtt_start++, vram_start++) {
172 if (*gtt_start != vram_start) {
173 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
174 "expected 0x%p (VRAM map 0x%p-0x%p)\n",
175 i, *gtt_start, vram_start, vram_map,
176 vram_end);
177 radeon_object_kunmap(gtt_obj[i]);
178 goto out_cleanup;
179 }
180 }
181
182 radeon_object_kunmap(gtt_obj[i]);
183
184 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
185 gtt_addr - rdev->mc.gtt_location);
186 }
187
188out_cleanup:
189 if (vram_obj) {
190 radeon_object_unpin(vram_obj);
191 radeon_object_unref(&vram_obj);
192 }
193 if (gtt_obj) {
194 for (i = 0; i < n; i++) {
195 if (gtt_obj[i]) {
196 radeon_object_unpin(gtt_obj[i]);
197 radeon_object_unref(&gtt_obj[i]);
198 }
199 }
200 kfree(gtt_obj);
201 }
202 if (fence) {
203 radeon_fence_unref(&fence);
204 }
205 if (r) {
206 printk(KERN_WARNING "Error while testing BO move.\n");
207 }
208}
209
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1227a97f5169..15c3531377ed 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -355,23 +355,26 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
355 if (!rdev->cp.ready) { 355 if (!rdev->cp.ready) {
356 /* use memcpy */ 356 /* use memcpy */
357 DRM_ERROR("CP is not ready use memcpy.\n"); 357 DRM_ERROR("CP is not ready use memcpy.\n");
358 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 358 goto memcpy;
359 } 359 }
360 360
361 if (old_mem->mem_type == TTM_PL_VRAM && 361 if (old_mem->mem_type == TTM_PL_VRAM &&
362 new_mem->mem_type == TTM_PL_SYSTEM) { 362 new_mem->mem_type == TTM_PL_SYSTEM) {
363 return radeon_move_vram_ram(bo, evict, interruptible, 363 r = radeon_move_vram_ram(bo, evict, interruptible,
364 no_wait, new_mem); 364 no_wait, new_mem);
365 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 365 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
366 new_mem->mem_type == TTM_PL_VRAM) { 366 new_mem->mem_type == TTM_PL_VRAM) {
367 return radeon_move_ram_vram(bo, evict, interruptible, 367 r = radeon_move_ram_vram(bo, evict, interruptible,
368 no_wait, new_mem); 368 no_wait, new_mem);
369 } else { 369 } else {
370 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); 370 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
371 if (unlikely(r)) {
372 return r;
373 }
374 } 371 }
372
373 if (r) {
374memcpy:
375 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
376 }
377
375 return r; 378 return r;
376} 379}
377 380
@@ -429,6 +432,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
429 .sync_obj_flush = &radeon_sync_obj_flush, 432 .sync_obj_flush = &radeon_sync_obj_flush,
430 .sync_obj_unref = &radeon_sync_obj_unref, 433 .sync_obj_unref = &radeon_sync_obj_unref,
431 .sync_obj_ref = &radeon_sync_obj_ref, 434 .sync_obj_ref = &radeon_sync_obj_ref,
435 .move_notify = &radeon_bo_move_notify,
436 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
432}; 437};
433 438
434int radeon_ttm_init(struct radeon_device *rdev) 439int radeon_ttm_init(struct radeon_device *rdev)
@@ -442,13 +447,14 @@ int radeon_ttm_init(struct radeon_device *rdev)
442 /* No others user of address space so set it to 0 */ 447 /* No others user of address space so set it to 0 */
443 r = ttm_bo_device_init(&rdev->mman.bdev, 448 r = ttm_bo_device_init(&rdev->mman.bdev,
444 rdev->mman.mem_global_ref.object, 449 rdev->mman.mem_global_ref.object,
445 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET); 450 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
451 rdev->need_dma32);
446 if (r) { 452 if (r) {
447 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 453 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
448 return r; 454 return r;
449 } 455 }
450 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, 456 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
451 ((rdev->mc.aper_size) >> PAGE_SHIFT)); 457 ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
452 if (r) { 458 if (r) {
453 DRM_ERROR("Failed initializing VRAM heap.\n"); 459 DRM_ERROR("Failed initializing VRAM heap.\n");
454 return r; 460 return r;
@@ -465,7 +471,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
465 return r; 471 return r;
466 } 472 }
467 DRM_INFO("radeon: %uM of VRAM memory ready\n", 473 DRM_INFO("radeon: %uM of VRAM memory ready\n",
468 rdev->mc.vram_size / (1024 * 1024)); 474 rdev->mc.real_vram_size / (1024 * 1024));
469 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, 475 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
470 ((rdev->mc.gtt_size) >> PAGE_SHIFT)); 476 ((rdev->mc.gtt_size) >> PAGE_SHIFT));
471 if (r) { 477 if (r) {
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index cc074b5a8f74..b29affd9c5d8 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -29,6 +29,7 @@
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include "radeon_reg.h" 30#include "radeon_reg.h"
31#include "radeon.h" 31#include "radeon.h"
32#include "radeon_share.h"
32 33
33/* rs400,rs480 depends on : */ 34/* rs400,rs480 depends on : */
34void r100_hdp_reset(struct radeon_device *rdev); 35void r100_hdp_reset(struct radeon_device *rdev);
@@ -164,7 +165,9 @@ int rs400_gart_enable(struct radeon_device *rdev)
164 WREG32(RADEON_BUS_CNTL, tmp); 165 WREG32(RADEON_BUS_CNTL, tmp);
165 } 166 }
166 /* Table should be in 32bits address space so ignore bits above. */ 167 /* Table should be in 32bits address space so ignore bits above. */
167 tmp = rdev->gart.table_addr & 0xfffff000; 168 tmp = (u32)rdev->gart.table_addr & 0xfffff000;
169 tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
170
168 WREG32_MC(RS480_GART_BASE, tmp); 171 WREG32_MC(RS480_GART_BASE, tmp);
169 /* TODO: more tweaking here */ 172 /* TODO: more tweaking here */
170 WREG32_MC(RS480_GART_FEATURE_ID, 173 WREG32_MC(RS480_GART_FEATURE_ID,
@@ -201,10 +204,17 @@ void rs400_gart_disable(struct radeon_device *rdev)
201 204
202int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 205int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
203{ 206{
207 uint32_t entry;
208
204 if (i < 0 || i > rdev->gart.num_gpu_pages) { 209 if (i < 0 || i > rdev->gart.num_gpu_pages) {
205 return -EINVAL; 210 return -EINVAL;
206 } 211 }
207 rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC); 212
213 entry = (lower_32_bits(addr) & PAGE_MASK) |
214 ((upper_32_bits(addr) & 0xff) << 4) |
215 0xc;
216 entry = cpu_to_le32(entry);
217 rdev->gart.table.ram.ptr[i] = entry;
208 return 0; 218 return 0;
209} 219}
210 220
@@ -223,10 +233,9 @@ int rs400_mc_init(struct radeon_device *rdev)
223 233
224 rs400_gpu_init(rdev); 234 rs400_gpu_init(rdev);
225 rs400_gart_disable(rdev); 235 rs400_gart_disable(rdev);
226 rdev->mc.gtt_location = rdev->mc.vram_size; 236 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
227 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); 237 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
228 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); 238 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
229 rdev->mc.vram_location = 0xFFFFFFFFUL;
230 r = radeon_mc_setup(rdev); 239 r = radeon_mc_setup(rdev);
231 if (r) { 240 if (r) {
232 return r; 241 return r;
@@ -238,7 +247,7 @@ int rs400_mc_init(struct radeon_device *rdev)
238 "programming pipes. Bad things might happen.\n"); 247 "programming pipes. Bad things might happen.\n");
239 } 248 }
240 249
241 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 250 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
242 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); 251 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
243 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); 252 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
244 WREG32(RADEON_MC_FB_LOCATION, tmp); 253 WREG32(RADEON_MC_FB_LOCATION, tmp);
@@ -284,21 +293,12 @@ void rs400_gpu_init(struct radeon_device *rdev)
284 */ 293 */
285void rs400_vram_info(struct radeon_device *rdev) 294void rs400_vram_info(struct radeon_device *rdev)
286{ 295{
287 uint32_t tom;
288
289 rs400_gart_adjust_size(rdev); 296 rs400_gart_adjust_size(rdev);
290 /* DDR for all card after R300 & IGP */ 297 /* DDR for all card after R300 & IGP */
291 rdev->mc.vram_is_ddr = true; 298 rdev->mc.vram_is_ddr = true;
292 rdev->mc.vram_width = 128; 299 rdev->mc.vram_width = 128;
293 300
294 /* read NB_TOM to get the amount of ram stolen for the GPU */ 301 r100_vram_init_sizes(rdev);
295 tom = RREG32(RADEON_NB_TOM);
296 rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
297 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
298
299 /* Could aper size report 0 ? */
300 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
301 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
302} 302}
303 303
304 304
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index ab0c967553e6..bbea6dee4a94 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -223,7 +223,7 @@ int rs600_mc_init(struct radeon_device *rdev)
223 printk(KERN_WARNING "Failed to wait MC idle while " 223 printk(KERN_WARNING "Failed to wait MC idle while "
224 "programming pipes. Bad things might happen.\n"); 224 "programming pipes. Bad things might happen.\n");
225 } 225 }
226 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 226 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
227 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); 227 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
228 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); 228 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
229 WREG32_MC(RS600_MC_FB_LOCATION, tmp); 229 WREG32_MC(RS600_MC_FB_LOCATION, tmp);
@@ -301,6 +301,11 @@ void rs600_vram_info(struct radeon_device *rdev)
301 rdev->mc.vram_width = 128; 301 rdev->mc.vram_width = 128;
302} 302}
303 303
304void rs600_bandwidth_update(struct radeon_device *rdev)
305{
306 /* FIXME: implement, should this be like rs690 ? */
307}
308
304 309
305/* 310/*
306 * Indirect registers accessor 311 * Indirect registers accessor
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 79ba85042b5f..839595b00728 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -28,6 +28,9 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h" 29#include "radeon_reg.h"
30#include "radeon.h" 30#include "radeon.h"
31#include "rs690r.h"
32#include "atom.h"
33#include "atom-bits.h"
31 34
32/* rs690,rs740 depends on : */ 35/* rs690,rs740 depends on : */
33void r100_hdp_reset(struct radeon_device *rdev); 36void r100_hdp_reset(struct radeon_device *rdev);
@@ -64,7 +67,7 @@ int rs690_mc_init(struct radeon_device *rdev)
64 rs400_gart_disable(rdev); 67 rs400_gart_disable(rdev);
65 68
66 /* Setup GPU memory space */ 69 /* Setup GPU memory space */
67 rdev->mc.gtt_location = rdev->mc.vram_size; 70 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
68 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); 71 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
69 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); 72 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
70 rdev->mc.vram_location = 0xFFFFFFFFUL; 73 rdev->mc.vram_location = 0xFFFFFFFFUL;
@@ -79,7 +82,7 @@ int rs690_mc_init(struct radeon_device *rdev)
79 printk(KERN_WARNING "Failed to wait MC idle while " 82 printk(KERN_WARNING "Failed to wait MC idle while "
80 "programming pipes. Bad things might happen.\n"); 83 "programming pipes. Bad things might happen.\n");
81 } 84 }
82 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 85 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
83 tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); 86 tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
84 tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); 87 tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
85 WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); 88 WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
@@ -138,9 +141,82 @@ void rs690_gpu_init(struct radeon_device *rdev)
138/* 141/*
139 * VRAM info. 142 * VRAM info.
140 */ 143 */
144void rs690_pm_info(struct radeon_device *rdev)
145{
146 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
147 struct _ATOM_INTEGRATED_SYSTEM_INFO *info;
148 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2;
149 void *ptr;
150 uint16_t data_offset;
151 uint8_t frev, crev;
152 fixed20_12 tmp;
153
154 atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
155 &frev, &crev, &data_offset);
156 ptr = rdev->mode_info.atom_context->bios + data_offset;
157 info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr;
158 info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr;
159 /* Get various system informations from bios */
160 switch (crev) {
161 case 1:
162 tmp.full = rfixed_const(100);
163 rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock);
164 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
165 rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock));
166 rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock));
167 rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth);
168 break;
169 case 2:
170 tmp.full = rfixed_const(100);
171 rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock);
172 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
173 rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock);
174 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
175 rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq);
176 rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
177 rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth));
178 break;
179 default:
180 tmp.full = rfixed_const(100);
181 /* We assume the slower possible clock ie worst case */
182 /* DDR 333Mhz */
183 rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
184 /* FIXME: system clock ? */
185 rdev->pm.igp_system_mclk.full = rfixed_const(100);
186 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
187 rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
188 rdev->pm.igp_ht_link_width.full = rfixed_const(8);
189 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
190 break;
191 }
192 /* Compute various bandwidth */
193 /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
194 tmp.full = rfixed_const(4);
195 rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
196 /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
197 * = ht_clk * ht_width / 5
198 */
199 tmp.full = rfixed_const(5);
200 rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
201 rdev->pm.igp_ht_link_width);
202 rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
203 if (tmp.full < rdev->pm.max_bandwidth.full) {
204 /* HT link is a limiting factor */
205 rdev->pm.max_bandwidth.full = tmp.full;
206 }
207 /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
208 * = (sideport_clk * 14) / 10
209 */
210 tmp.full = rfixed_const(14);
211 rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
212 tmp.full = rfixed_const(10);
213 rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
214}
215
141void rs690_vram_info(struct radeon_device *rdev) 216void rs690_vram_info(struct radeon_device *rdev)
142{ 217{
143 uint32_t tmp; 218 uint32_t tmp;
219 fixed20_12 a;
144 220
145 rs400_gart_adjust_size(rdev); 221 rs400_gart_adjust_size(rdev);
146 /* DDR for all card after R300 & IGP */ 222 /* DDR for all card after R300 & IGP */
@@ -152,12 +228,409 @@ void rs690_vram_info(struct radeon_device *rdev)
152 } else { 228 } else {
153 rdev->mc.vram_width = 64; 229 rdev->mc.vram_width = 64;
154 } 230 }
155 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 231 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
232 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
156 233
157 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 234 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
158 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 235 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
236 rs690_pm_info(rdev);
237 /* FIXME: we should enforce default clock in case GPU is not in
238 * default setup
239 */
240 a.full = rfixed_const(100);
241 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
242 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
243 a.full = rfixed_const(16);
244 /* core_bandwidth = sclk(Mhz) * 16 */
245 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
246}
247
248void rs690_line_buffer_adjust(struct radeon_device *rdev,
249 struct drm_display_mode *mode1,
250 struct drm_display_mode *mode2)
251{
252 u32 tmp;
253
254 /*
255 * Line Buffer Setup
256 * There is a single line buffer shared by both display controllers.
257 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
258 * the display controllers. The paritioning can either be done
259 * manually or via one of four preset allocations specified in bits 1:0:
260 * 0 - line buffer is divided in half and shared between crtc
261 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
262 * 2 - D1 gets the whole buffer
263 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
264 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual
265 * allocation mode. In manual allocation mode, D1 always starts at 0,
266 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
267 */
268 tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK;
269 tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE;
270 /* auto */
271 if (mode1 && mode2) {
272 if (mode1->hdisplay > mode2->hdisplay) {
273 if (mode1->hdisplay > 2560)
274 tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
275 else
276 tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
277 } else if (mode2->hdisplay > mode1->hdisplay) {
278 if (mode2->hdisplay > 2560)
279 tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
280 else
281 tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
282 } else
283 tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
284 } else if (mode1) {
285 tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY;
286 } else if (mode2) {
287 tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
288 }
289 WREG32(DC_LB_MEMORY_SPLIT, tmp);
159} 290}
160 291
292struct rs690_watermark {
293 u32 lb_request_fifo_depth;
294 fixed20_12 num_line_pair;
295 fixed20_12 estimated_width;
296 fixed20_12 worst_case_latency;
297 fixed20_12 consumption_rate;
298 fixed20_12 active_time;
299 fixed20_12 dbpp;
300 fixed20_12 priority_mark_max;
301 fixed20_12 priority_mark;
302 fixed20_12 sclk;
303};
304
305void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
306 struct radeon_crtc *crtc,
307 struct rs690_watermark *wm)
308{
309 struct drm_display_mode *mode = &crtc->base.mode;
310 fixed20_12 a, b, c;
311 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
312 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
313 /* FIXME: detect IGP with sideport memory, i don't think there is any
314 * such product available
315 */
316 bool sideport = false;
317
318 if (!crtc->base.enabled) {
319 /* FIXME: wouldn't it better to set priority mark to maximum */
320 wm->lb_request_fifo_depth = 4;
321 return;
322 }
323
324 if (crtc->vsc.full > rfixed_const(2))
325 wm->num_line_pair.full = rfixed_const(2);
326 else
327 wm->num_line_pair.full = rfixed_const(1);
328
329 b.full = rfixed_const(mode->crtc_hdisplay);
330 c.full = rfixed_const(256);
331 a.full = rfixed_mul(wm->num_line_pair, b);
332 request_fifo_depth.full = rfixed_div(a, c);
333 if (a.full < rfixed_const(4)) {
334 wm->lb_request_fifo_depth = 4;
335 } else {
336 wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
337 }
338
339 /* Determine consumption rate
340 * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
341 * vtaps = number of vertical taps,
342 * vsc = vertical scaling ratio, defined as source/destination
343 * hsc = horizontal scaling ration, defined as source/destination
344 */
345 a.full = rfixed_const(mode->clock);
346 b.full = rfixed_const(1000);
347 a.full = rfixed_div(a, b);
348 pclk.full = rfixed_div(b, a);
349 if (crtc->rmx_type != RMX_OFF) {
350 b.full = rfixed_const(2);
351 if (crtc->vsc.full > b.full)
352 b.full = crtc->vsc.full;
353 b.full = rfixed_mul(b, crtc->hsc);
354 c.full = rfixed_const(2);
355 b.full = rfixed_div(b, c);
356 consumption_time.full = rfixed_div(pclk, b);
357 } else {
358 consumption_time.full = pclk.full;
359 }
360 a.full = rfixed_const(1);
361 wm->consumption_rate.full = rfixed_div(a, consumption_time);
362
363
364 /* Determine line time
365 * LineTime = total time for one line of displayhtotal
366 * LineTime = total number of horizontal pixels
367 * pclk = pixel clock period(ns)
368 */
369 a.full = rfixed_const(crtc->base.mode.crtc_htotal);
370 line_time.full = rfixed_mul(a, pclk);
371
372 /* Determine active time
373 * ActiveTime = time of active region of display within one line,
374 * hactive = total number of horizontal active pixels
375 * htotal = total number of horizontal pixels
376 */
377 a.full = rfixed_const(crtc->base.mode.crtc_htotal);
378 b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
379 wm->active_time.full = rfixed_mul(line_time, b);
380 wm->active_time.full = rfixed_div(wm->active_time, a);
381
382 /* Maximun bandwidth is the minimun bandwidth of all component */
383 rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
384 if (sideport) {
385 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
386 rdev->pm.sideport_bandwidth.full)
387 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
388 read_delay_latency.full = rfixed_const(370 * 800 * 1000);
389 read_delay_latency.full = rfixed_div(read_delay_latency,
390 rdev->pm.igp_sideport_mclk);
391 } else {
392 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
393 rdev->pm.k8_bandwidth.full)
394 rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth;
395 if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
396 rdev->pm.ht_bandwidth.full)
397 rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
398 read_delay_latency.full = rfixed_const(5000);
399 }
400
401 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
402 a.full = rfixed_const(16);
403 rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
404 a.full = rfixed_const(1000);
405 rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
406 /* Determine chunk time
407 * ChunkTime = the time it takes the DCP to send one chunk of data
408 * to the LB which consists of pipeline delay and inter chunk gap
409 * sclk = system clock(ns)
410 */
411 a.full = rfixed_const(256 * 13);
412 chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
413 a.full = rfixed_const(10);
414 chunk_time.full = rfixed_div(chunk_time, a);
415
416 /* Determine the worst case latency
417 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
418 * WorstCaseLatency = worst case time from urgent to when the MC starts
419 * to return data
420 * READ_DELAY_IDLE_MAX = constant of 1us
421 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
422 * which consists of pipeline delay and inter chunk gap
423 */
424 if (rfixed_trunc(wm->num_line_pair) > 1) {
425 a.full = rfixed_const(3);
426 wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
427 wm->worst_case_latency.full += read_delay_latency.full;
428 } else {
429 a.full = rfixed_const(2);
430 wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
431 wm->worst_case_latency.full += read_delay_latency.full;
432 }
433
434 /* Determine the tolerable latency
435 * TolerableLatency = Any given request has only 1 line time
436 * for the data to be returned
437 * LBRequestFifoDepth = Number of chunk requests the LB can
438 * put into the request FIFO for a display
439 * LineTime = total time for one line of display
440 * ChunkTime = the time it takes the DCP to send one chunk
441 * of data to the LB which consists of
442 * pipeline delay and inter chunk gap
443 */
444 if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
445 tolerable_latency.full = line_time.full;
446 } else {
447 tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
448 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
449 tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
450 tolerable_latency.full = line_time.full - tolerable_latency.full;
451 }
452 /* We assume worst case 32bits (4 bytes) */
453 wm->dbpp.full = rfixed_const(4 * 8);
454
455 /* Determine the maximum priority mark
456 * width = viewport width in pixels
457 */
458 a.full = rfixed_const(16);
459 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
460 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
461
462 /* Determine estimated width */
463 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
464 estimated_width.full = rfixed_div(estimated_width, consumption_time);
465 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
466 wm->priority_mark.full = rfixed_const(10);
467 } else {
468 a.full = rfixed_const(16);
469 wm->priority_mark.full = rfixed_div(estimated_width, a);
470 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
471 }
472}
473
474void rs690_bandwidth_update(struct radeon_device *rdev)
475{
476 struct drm_display_mode *mode0 = NULL;
477 struct drm_display_mode *mode1 = NULL;
478 struct rs690_watermark wm0;
479 struct rs690_watermark wm1;
480 u32 tmp;
481 fixed20_12 priority_mark02, priority_mark12, fill_rate;
482 fixed20_12 a, b;
483
484 if (rdev->mode_info.crtcs[0]->base.enabled)
485 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
486 if (rdev->mode_info.crtcs[1]->base.enabled)
487 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
488 /*
489 * Set display0/1 priority up in the memory controller for
490 * modes if the user specifies HIGH for displaypriority
491 * option.
492 */
493 if (rdev->disp_priority == 2) {
494 tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER);
495 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
496 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
497 if (mode1)
498 tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
499 if (mode0)
500 tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
501 WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp);
502 }
503 rs690_line_buffer_adjust(rdev, mode0, mode1);
504
505 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
506 WREG32(DCP_CONTROL, 0);
507 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
508 WREG32(DCP_CONTROL, 2);
509
510 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
511 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
512
513 tmp = (wm0.lb_request_fifo_depth - 1);
514 tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
515 WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
516
517 if (mode0 && mode1) {
518 if (rfixed_trunc(wm0.dbpp) > 64)
519 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
520 else
521 a.full = wm0.num_line_pair.full;
522 if (rfixed_trunc(wm1.dbpp) > 64)
523 b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
524 else
525 b.full = wm1.num_line_pair.full;
526 a.full += b.full;
527 fill_rate.full = rfixed_div(wm0.sclk, a);
528 if (wm0.consumption_rate.full > fill_rate.full) {
529 b.full = wm0.consumption_rate.full - fill_rate.full;
530 b.full = rfixed_mul(b, wm0.active_time);
531 a.full = rfixed_mul(wm0.worst_case_latency,
532 wm0.consumption_rate);
533 a.full = a.full + b.full;
534 b.full = rfixed_const(16 * 1000);
535 priority_mark02.full = rfixed_div(a, b);
536 } else {
537 a.full = rfixed_mul(wm0.worst_case_latency,
538 wm0.consumption_rate);
539 b.full = rfixed_const(16 * 1000);
540 priority_mark02.full = rfixed_div(a, b);
541 }
542 if (wm1.consumption_rate.full > fill_rate.full) {
543 b.full = wm1.consumption_rate.full - fill_rate.full;
544 b.full = rfixed_mul(b, wm1.active_time);
545 a.full = rfixed_mul(wm1.worst_case_latency,
546 wm1.consumption_rate);
547 a.full = a.full + b.full;
548 b.full = rfixed_const(16 * 1000);
549 priority_mark12.full = rfixed_div(a, b);
550 } else {
551 a.full = rfixed_mul(wm1.worst_case_latency,
552 wm1.consumption_rate);
553 b.full = rfixed_const(16 * 1000);
554 priority_mark12.full = rfixed_div(a, b);
555 }
556 if (wm0.priority_mark.full > priority_mark02.full)
557 priority_mark02.full = wm0.priority_mark.full;
558 if (rfixed_trunc(priority_mark02) < 0)
559 priority_mark02.full = 0;
560 if (wm0.priority_mark_max.full > priority_mark02.full)
561 priority_mark02.full = wm0.priority_mark_max.full;
562 if (wm1.priority_mark.full > priority_mark12.full)
563 priority_mark12.full = wm1.priority_mark.full;
564 if (rfixed_trunc(priority_mark12) < 0)
565 priority_mark12.full = 0;
566 if (wm1.priority_mark_max.full > priority_mark12.full)
567 priority_mark12.full = wm1.priority_mark_max.full;
568 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
569 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
570 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
571 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
572 } else if (mode0) {
573 if (rfixed_trunc(wm0.dbpp) > 64)
574 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
575 else
576 a.full = wm0.num_line_pair.full;
577 fill_rate.full = rfixed_div(wm0.sclk, a);
578 if (wm0.consumption_rate.full > fill_rate.full) {
579 b.full = wm0.consumption_rate.full - fill_rate.full;
580 b.full = rfixed_mul(b, wm0.active_time);
581 a.full = rfixed_mul(wm0.worst_case_latency,
582 wm0.consumption_rate);
583 a.full = a.full + b.full;
584 b.full = rfixed_const(16 * 1000);
585 priority_mark02.full = rfixed_div(a, b);
586 } else {
587 a.full = rfixed_mul(wm0.worst_case_latency,
588 wm0.consumption_rate);
589 b.full = rfixed_const(16 * 1000);
590 priority_mark02.full = rfixed_div(a, b);
591 }
592 if (wm0.priority_mark.full > priority_mark02.full)
593 priority_mark02.full = wm0.priority_mark.full;
594 if (rfixed_trunc(priority_mark02) < 0)
595 priority_mark02.full = 0;
596 if (wm0.priority_mark_max.full > priority_mark02.full)
597 priority_mark02.full = wm0.priority_mark_max.full;
598 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
599 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
600 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
601 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
602 } else {
603 if (rfixed_trunc(wm1.dbpp) > 64)
604 a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
605 else
606 a.full = wm1.num_line_pair.full;
607 fill_rate.full = rfixed_div(wm1.sclk, a);
608 if (wm1.consumption_rate.full > fill_rate.full) {
609 b.full = wm1.consumption_rate.full - fill_rate.full;
610 b.full = rfixed_mul(b, wm1.active_time);
611 a.full = rfixed_mul(wm1.worst_case_latency,
612 wm1.consumption_rate);
613 a.full = a.full + b.full;
614 b.full = rfixed_const(16 * 1000);
615 priority_mark12.full = rfixed_div(a, b);
616 } else {
617 a.full = rfixed_mul(wm1.worst_case_latency,
618 wm1.consumption_rate);
619 b.full = rfixed_const(16 * 1000);
620 priority_mark12.full = rfixed_div(a, b);
621 }
622 if (wm1.priority_mark.full > priority_mark12.full)
623 priority_mark12.full = wm1.priority_mark.full;
624 if (rfixed_trunc(priority_mark12) < 0)
625 priority_mark12.full = 0;
626 if (wm1.priority_mark_max.full > priority_mark12.full)
627 priority_mark12.full = wm1.priority_mark_max.full;
628 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
629 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
630 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
631 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
632 }
633}
161 634
162/* 635/*
163 * Indirect registers accessor 636 * Indirect registers accessor
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h
new file mode 100644
index 000000000000..c0d9faa2175b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690r.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef RS690R_H
29#define RS690R_H
30
31/* RS690/RS740 registers */
32#define MC_INDEX 0x0078
33# define MC_INDEX_MASK 0x1FF
34# define MC_INDEX_WR_EN (1 << 9)
35# define MC_INDEX_WR_ACK 0x7F
36#define MC_DATA 0x007C
37#define HDP_FB_LOCATION 0x0134
38#define DC_LB_MEMORY_SPLIT 0x6520
39#define DC_LB_MEMORY_SPLIT_MASK 0x00000003
40#define DC_LB_MEMORY_SPLIT_SHIFT 0
41#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
42#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
43#define DC_LB_MEMORY_SPLIT_D1_ONLY 2
44#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
45#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
46#define DC_LB_DISP1_END_ADR_SHIFT 4
47#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0
48#define D1MODE_PRIORITY_A_CNT 0x6548
49#define MODE_PRIORITY_MARK_MASK 0x00007FFF
50#define MODE_PRIORITY_OFF (1 << 16)
51#define MODE_PRIORITY_ALWAYS_ON (1 << 20)
52#define MODE_PRIORITY_FORCE_MASK (1 << 24)
53#define D1MODE_PRIORITY_B_CNT 0x654C
54#define LB_MAX_REQ_OUTSTANDING 0x6D58
55#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F
56#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0
57#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000
58#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16
59#define DCP_CONTROL 0x6C9C
60#define D2MODE_PRIORITY_A_CNT 0x6D48
61#define D2MODE_PRIORITY_B_CNT 0x6D4C
62
63/* MC indirect registers */
64#define MC_STATUS_IDLE (1 << 0)
65#define MC_MISC_CNTL 0x18
66#define DISABLE_GTW (1 << 1)
67#define GART_INDEX_REG_EN (1 << 12)
68#define BLOCK_GFX_D3_EN (1 << 14)
69#define GART_FEATURE_ID 0x2B
70#define HANG_EN (1 << 11)
71#define TLB_ENABLE (1 << 18)
72#define P2P_ENABLE (1 << 19)
73#define GTW_LAC_EN (1 << 25)
74#define LEVEL2_GART (0 << 30)
75#define LEVEL1_GART (1 << 30)
76#define PDC_EN (1 << 31)
77#define GART_BASE 0x2C
78#define GART_CACHE_CNTRL 0x2E
79# define GART_CACHE_INVALIDATE (1 << 0)
80#define MC_STATUS 0x90
81#define MCCFG_FB_LOCATION 0x100
82#define MC_FB_START_MASK 0x0000FFFF
83#define MC_FB_START_SHIFT 0
84#define MC_FB_TOP_MASK 0xFFFF0000
85#define MC_FB_TOP_SHIFT 16
86#define MCCFG_AGP_LOCATION 0x101
87#define MC_AGP_START_MASK 0x0000FFFF
88#define MC_AGP_START_SHIFT 0
89#define MC_AGP_TOP_MASK 0xFFFF0000
90#define MC_AGP_TOP_SHIFT 16
91#define MCCFG_AGP_BASE 0x102
92#define MCCFG_AGP_BASE_2 0x103
93#define MC_INIT_MISC_LAT_TIMER 0x104
94#define MC_DISP0R_INIT_LAT_SHIFT 8
95#define MC_DISP0R_INIT_LAT_MASK 0x00000F00
96#define MC_DISP1R_INIT_LAT_SHIFT 12
97#define MC_DISP1R_INIT_LAT_MASK 0x0000F000
98
99#endif
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ffea37b1b3e2..fd8f3ca716ea 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -27,8 +27,9 @@
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include "drmP.h" 29#include "drmP.h"
30#include "radeon_reg.h" 30#include "rv515r.h"
31#include "radeon.h" 31#include "radeon.h"
32#include "radeon_share.h"
32 33
33/* rv515 depends on : */ 34/* rv515 depends on : */
34void r100_hdp_reset(struct radeon_device *rdev); 35void r100_hdp_reset(struct radeon_device *rdev);
@@ -99,26 +100,26 @@ int rv515_mc_init(struct radeon_device *rdev)
99 "programming pipes. Bad things might happen.\n"); 100 "programming pipes. Bad things might happen.\n");
100 } 101 }
101 /* Write VRAM size in case we are limiting it */ 102 /* Write VRAM size in case we are limiting it */
102 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); 103 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
103 tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); 104 tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
104 WREG32(0x134, tmp); 105 WREG32(0x134, tmp);
105 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 106 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
106 tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16); 107 tmp = REG_SET(MC_FB_TOP, tmp >> 16);
107 tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); 108 tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
108 WREG32_MC(RV515_MC_FB_LOCATION, tmp); 109 WREG32_MC(MC_FB_LOCATION, tmp);
109 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); 110 WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
110 WREG32(0x310, rdev->mc.vram_location); 111 WREG32(0x310, rdev->mc.vram_location);
111 if (rdev->flags & RADEON_IS_AGP) { 112 if (rdev->flags & RADEON_IS_AGP) {
112 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 113 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
113 tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16); 114 tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
114 tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16); 115 tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
115 WREG32_MC(RV515_MC_AGP_LOCATION, tmp); 116 WREG32_MC(MC_AGP_LOCATION, tmp);
116 WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base); 117 WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
117 WREG32_MC(RV515_MC_AGP_BASE_2, 0); 118 WREG32_MC(MC_AGP_BASE_2, 0);
118 } else { 119 } else {
119 WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF); 120 WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF);
120 WREG32_MC(RV515_MC_AGP_BASE, 0); 121 WREG32_MC(MC_AGP_BASE, 0);
121 WREG32_MC(RV515_MC_AGP_BASE_2, 0); 122 WREG32_MC(MC_AGP_BASE_2, 0);
122 } 123 }
123 return 0; 124 return 0;
124} 125}
@@ -136,95 +137,67 @@ void rv515_mc_fini(struct radeon_device *rdev)
136 */ 137 */
137void rv515_ring_start(struct radeon_device *rdev) 138void rv515_ring_start(struct radeon_device *rdev)
138{ 139{
139 unsigned gb_tile_config;
140 int r; 140 int r;
141 141
142 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
143 gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16;
144 switch (rdev->num_gb_pipes) {
145 case 2:
146 gb_tile_config |= R300_PIPE_COUNT_R300;
147 break;
148 case 3:
149 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
150 break;
151 case 4:
152 gb_tile_config |= R300_PIPE_COUNT_R420;
153 break;
154 case 1:
155 default:
156 gb_tile_config |= R300_PIPE_COUNT_RV350;
157 break;
158 }
159
160 r = radeon_ring_lock(rdev, 64); 142 r = radeon_ring_lock(rdev, 64);
161 if (r) { 143 if (r) {
162 return; 144 return;
163 } 145 }
164 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); 146 radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
165 radeon_ring_write(rdev,
166 RADEON_ISYNC_ANY2D_IDLE3D |
167 RADEON_ISYNC_ANY3D_IDLE2D |
168 RADEON_ISYNC_WAIT_IDLEGUI |
169 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
170 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
171 radeon_ring_write(rdev, gb_tile_config);
172 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
173 radeon_ring_write(rdev, 147 radeon_ring_write(rdev,
174 RADEON_WAIT_2D_IDLECLEAN | 148 ISYNC_ANY2D_IDLE3D |
175 RADEON_WAIT_3D_IDLECLEAN); 149 ISYNC_ANY3D_IDLE2D |
150 ISYNC_WAIT_IDLEGUI |
151 ISYNC_CPSCRATCH_IDLEGUI);
152 radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
153 radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
176 radeon_ring_write(rdev, PACKET0(0x170C, 0)); 154 radeon_ring_write(rdev, PACKET0(0x170C, 0));
177 radeon_ring_write(rdev, 1 << 31); 155 radeon_ring_write(rdev, 1 << 31);
178 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); 156 radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
179 radeon_ring_write(rdev, 0); 157 radeon_ring_write(rdev, 0);
180 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); 158 radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
181 radeon_ring_write(rdev, 0); 159 radeon_ring_write(rdev, 0);
182 radeon_ring_write(rdev, PACKET0(0x42C8, 0)); 160 radeon_ring_write(rdev, PACKET0(0x42C8, 0));
183 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); 161 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
184 radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0)); 162 radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
185 radeon_ring_write(rdev, 0); 163 radeon_ring_write(rdev, 0);
186 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 164 radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
187 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 165 radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
188 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 166 radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
189 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); 167 radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
190 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 168 radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
191 radeon_ring_write(rdev, 169 radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
192 RADEON_WAIT_2D_IDLECLEAN | 170 radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
193 RADEON_WAIT_3D_IDLECLEAN);
194 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
195 radeon_ring_write(rdev, 0); 171 radeon_ring_write(rdev, 0);
196 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 172 radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
197 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 173 radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
198 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 174 radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
199 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); 175 radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
200 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); 176 radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
201 radeon_ring_write(rdev,
202 ((6 << R300_MS_X0_SHIFT) |
203 (6 << R300_MS_Y0_SHIFT) |
204 (6 << R300_MS_X1_SHIFT) |
205 (6 << R300_MS_Y1_SHIFT) |
206 (6 << R300_MS_X2_SHIFT) |
207 (6 << R300_MS_Y2_SHIFT) |
208 (6 << R300_MSBD0_Y_SHIFT) |
209 (6 << R300_MSBD0_X_SHIFT)));
210 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
211 radeon_ring_write(rdev, 177 radeon_ring_write(rdev,
212 ((6 << R300_MS_X3_SHIFT) | 178 ((6 << MS_X0_SHIFT) |
213 (6 << R300_MS_Y3_SHIFT) | 179 (6 << MS_Y0_SHIFT) |
214 (6 << R300_MS_X4_SHIFT) | 180 (6 << MS_X1_SHIFT) |
215 (6 << R300_MS_Y4_SHIFT) | 181 (6 << MS_Y1_SHIFT) |
216 (6 << R300_MS_X5_SHIFT) | 182 (6 << MS_X2_SHIFT) |
217 (6 << R300_MS_Y5_SHIFT) | 183 (6 << MS_Y2_SHIFT) |
218 (6 << R300_MSBD1_SHIFT))); 184 (6 << MSBD0_Y_SHIFT) |
219 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); 185 (6 << MSBD0_X_SHIFT)));
220 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); 186 radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
221 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
222 radeon_ring_write(rdev, 187 radeon_ring_write(rdev,
223 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); 188 ((6 << MS_X3_SHIFT) |
224 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); 189 (6 << MS_Y3_SHIFT) |
225 radeon_ring_write(rdev, 190 (6 << MS_X4_SHIFT) |
226 R300_GEOMETRY_ROUND_NEAREST | 191 (6 << MS_Y4_SHIFT) |
227 R300_COLOR_ROUND_NEAREST); 192 (6 << MS_X5_SHIFT) |
193 (6 << MS_Y5_SHIFT) |
194 (6 << MSBD1_SHIFT)));
195 radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
196 radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
197 radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
198 radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
199 radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
200 radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
228 radeon_ring_write(rdev, PACKET0(0x20C8, 0)); 201 radeon_ring_write(rdev, PACKET0(0x20C8, 0));
229 radeon_ring_write(rdev, 0); 202 radeon_ring_write(rdev, 0);
230 radeon_ring_unlock_commit(rdev); 203 radeon_ring_unlock_commit(rdev);
@@ -242,8 +215,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
242 215
243 for (i = 0; i < rdev->usec_timeout; i++) { 216 for (i = 0; i < rdev->usec_timeout; i++) {
244 /* read MC_STATUS */ 217 /* read MC_STATUS */
245 tmp = RREG32_MC(RV515_MC_STATUS); 218 tmp = RREG32_MC(MC_STATUS);
246 if (tmp & RV515_MC_STATUS_IDLE) { 219 if (tmp & MC_STATUS_IDLE) {
247 return 0; 220 return 0;
248 } 221 }
249 DRM_UDELAY(1); 222 DRM_UDELAY(1);
@@ -291,33 +264,33 @@ int rv515_ga_reset(struct radeon_device *rdev)
291 reinit_cp = rdev->cp.ready; 264 reinit_cp = rdev->cp.ready;
292 rdev->cp.ready = false; 265 rdev->cp.ready = false;
293 for (i = 0; i < rdev->usec_timeout; i++) { 266 for (i = 0; i < rdev->usec_timeout; i++) {
294 WREG32(RADEON_CP_CSQ_MODE, 0); 267 WREG32(CP_CSQ_MODE, 0);
295 WREG32(RADEON_CP_CSQ_CNTL, 0); 268 WREG32(CP_CSQ_CNTL, 0);
296 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); 269 WREG32(RBBM_SOFT_RESET, 0x32005);
297 (void)RREG32(RADEON_RBBM_SOFT_RESET); 270 (void)RREG32(RBBM_SOFT_RESET);
298 udelay(200); 271 udelay(200);
299 WREG32(RADEON_RBBM_SOFT_RESET, 0); 272 WREG32(RBBM_SOFT_RESET, 0);
300 /* Wait to prevent race in RBBM_STATUS */ 273 /* Wait to prevent race in RBBM_STATUS */
301 mdelay(1); 274 mdelay(1);
302 tmp = RREG32(RADEON_RBBM_STATUS); 275 tmp = RREG32(RBBM_STATUS);
303 if (tmp & ((1 << 20) | (1 << 26))) { 276 if (tmp & ((1 << 20) | (1 << 26))) {
304 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); 277 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
305 /* GA still busy soft reset it */ 278 /* GA still busy soft reset it */
306 WREG32(0x429C, 0x200); 279 WREG32(0x429C, 0x200);
307 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); 280 WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
308 WREG32(0x43E0, 0); 281 WREG32(0x43E0, 0);
309 WREG32(0x43E4, 0); 282 WREG32(0x43E4, 0);
310 WREG32(0x24AC, 0); 283 WREG32(0x24AC, 0);
311 } 284 }
312 /* Wait to prevent race in RBBM_STATUS */ 285 /* Wait to prevent race in RBBM_STATUS */
313 mdelay(1); 286 mdelay(1);
314 tmp = RREG32(RADEON_RBBM_STATUS); 287 tmp = RREG32(RBBM_STATUS);
315 if (!(tmp & ((1 << 20) | (1 << 26)))) { 288 if (!(tmp & ((1 << 20) | (1 << 26)))) {
316 break; 289 break;
317 } 290 }
318 } 291 }
319 for (i = 0; i < rdev->usec_timeout; i++) { 292 for (i = 0; i < rdev->usec_timeout; i++) {
320 tmp = RREG32(RADEON_RBBM_STATUS); 293 tmp = RREG32(RBBM_STATUS);
321 if (!(tmp & ((1 << 20) | (1 << 26)))) { 294 if (!(tmp & ((1 << 20) | (1 << 26)))) {
322 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", 295 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
323 tmp); 296 tmp);
@@ -331,7 +304,7 @@ int rv515_ga_reset(struct radeon_device *rdev)
331 } 304 }
332 DRM_UDELAY(1); 305 DRM_UDELAY(1);
333 } 306 }
334 tmp = RREG32(RADEON_RBBM_STATUS); 307 tmp = RREG32(RBBM_STATUS);
335 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); 308 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
336 return -1; 309 return -1;
337} 310}
@@ -341,7 +314,7 @@ int rv515_gpu_reset(struct radeon_device *rdev)
341 uint32_t status; 314 uint32_t status;
342 315
343 /* reset order likely matter */ 316 /* reset order likely matter */
344 status = RREG32(RADEON_RBBM_STATUS); 317 status = RREG32(RBBM_STATUS);
345 /* reset HDP */ 318 /* reset HDP */
346 r100_hdp_reset(rdev); 319 r100_hdp_reset(rdev);
347 /* reset rb2d */ 320 /* reset rb2d */
@@ -353,12 +326,12 @@ int rv515_gpu_reset(struct radeon_device *rdev)
353 rv515_ga_reset(rdev); 326 rv515_ga_reset(rdev);
354 } 327 }
355 /* reset CP */ 328 /* reset CP */
356 status = RREG32(RADEON_RBBM_STATUS); 329 status = RREG32(RBBM_STATUS);
357 if (status & (1 << 16)) { 330 if (status & (1 << 16)) {
358 r100_cp_reset(rdev); 331 r100_cp_reset(rdev);
359 } 332 }
360 /* Check if GPU is idle */ 333 /* Check if GPU is idle */
361 status = RREG32(RADEON_RBBM_STATUS); 334 status = RREG32(RBBM_STATUS);
362 if (status & (1 << 31)) { 335 if (status & (1 << 31)) {
363 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 336 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
364 return -1; 337 return -1;
@@ -377,8 +350,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
377 350
378 rdev->mc.vram_width = 128; 351 rdev->mc.vram_width = 128;
379 rdev->mc.vram_is_ddr = true; 352 rdev->mc.vram_is_ddr = true;
380 tmp = RREG32_MC(RV515_MC_CNTL); 353 tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
381 tmp &= RV515_MEM_NUM_CHANNELS_MASK;
382 switch (tmp) { 354 switch (tmp) {
383 case 0: 355 case 0:
384 rdev->mc.vram_width = 64; 356 rdev->mc.vram_width = 64;
@@ -394,11 +366,17 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
394 366
395void rv515_vram_info(struct radeon_device *rdev) 367void rv515_vram_info(struct radeon_device *rdev)
396{ 368{
369 fixed20_12 a;
370
397 rv515_vram_get_type(rdev); 371 rv515_vram_get_type(rdev);
398 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
399 372
400 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 373 r100_vram_init_sizes(rdev);
401 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 374 /* FIXME: we should enforce default clock in case GPU is not in
375 * default setup
376 */
377 a.full = rfixed_const(100);
378 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
379 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
402} 380}
403 381
404 382
@@ -409,35 +387,35 @@ uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
409{ 387{
410 uint32_t r; 388 uint32_t r;
411 389
412 WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); 390 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
413 r = RREG32(R520_MC_IND_DATA); 391 r = RREG32(MC_IND_DATA);
414 WREG32(R520_MC_IND_INDEX, 0); 392 WREG32(MC_IND_INDEX, 0);
415 return r; 393 return r;
416} 394}
417 395
418void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 396void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
419{ 397{
420 WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); 398 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
421 WREG32(R520_MC_IND_DATA, (v)); 399 WREG32(MC_IND_DATA, (v));
422 WREG32(R520_MC_IND_INDEX, 0); 400 WREG32(MC_IND_INDEX, 0);
423} 401}
424 402
425uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 403uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
426{ 404{
427 uint32_t r; 405 uint32_t r;
428 406
429 WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); 407 WREG32(PCIE_INDEX, ((reg) & 0x7ff));
430 (void)RREG32(RADEON_PCIE_INDEX); 408 (void)RREG32(PCIE_INDEX);
431 r = RREG32(RADEON_PCIE_DATA); 409 r = RREG32(PCIE_DATA);
432 return r; 410 return r;
433} 411}
434 412
435void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 413void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
436{ 414{
437 WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); 415 WREG32(PCIE_INDEX, ((reg) & 0x7ff));
438 (void)RREG32(RADEON_PCIE_INDEX); 416 (void)RREG32(PCIE_INDEX);
439 WREG32(RADEON_PCIE_DATA, (v)); 417 WREG32(PCIE_DATA, (v));
440 (void)RREG32(RADEON_PCIE_DATA); 418 (void)RREG32(PCIE_DATA);
441} 419}
442 420
443 421
@@ -452,13 +430,13 @@ static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
452 struct radeon_device *rdev = dev->dev_private; 430 struct radeon_device *rdev = dev->dev_private;
453 uint32_t tmp; 431 uint32_t tmp;
454 432
455 tmp = RREG32(R400_GB_PIPE_SELECT); 433 tmp = RREG32(GB_PIPE_SELECT);
456 seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); 434 seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
457 tmp = RREG32(R500_SU_REG_DEST); 435 tmp = RREG32(SU_REG_DEST);
458 seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); 436 seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
459 tmp = RREG32(R300_GB_TILE_CONFIG); 437 tmp = RREG32(GB_TILE_CONFIG);
460 seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); 438 seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
461 tmp = RREG32(R300_DST_PIPE_CONFIG); 439 tmp = RREG32(DST_PIPE_CONFIG);
462 seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); 440 seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
463 return 0; 441 return 0;
464} 442}
@@ -509,9 +487,9 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
509/* 487/*
510 * Asic initialization 488 * Asic initialization
511 */ 489 */
512static const unsigned r500_reg_safe_bm[159] = { 490static const unsigned r500_reg_safe_bm[219] = {
491 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
513 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 492 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
514 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
515 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 493 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
516 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 494 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
517 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 495 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
@@ -549,14 +527,575 @@ static const unsigned r500_reg_safe_bm[159] = {
549 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 527 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
550 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, 528 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
551 0x00000000, 0x00000000, 0x00000000, 0x00000000, 529 0x00000000, 0x00000000, 0x00000000, 0x00000000,
552 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 530 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF,
531 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
532 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
533 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
534 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
535 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
536 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
537 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
538 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
539 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
540 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
541 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
542 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
543 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
544 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
545 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
553}; 546};
554 547
555
556
557int rv515_init(struct radeon_device *rdev) 548int rv515_init(struct radeon_device *rdev)
558{ 549{
559 rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; 550 rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
560 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); 551 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
561 return 0; 552 return 0;
562} 553}
554
555void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
556{
557
558 WREG32(0x659C, 0x0);
559 WREG32(0x6594, 0x705);
560 WREG32(0x65A4, 0x10001);
561 WREG32(0x65D8, 0x0);
562 WREG32(0x65B0, 0x0);
563 WREG32(0x65C0, 0x0);
564 WREG32(0x65D4, 0x0);
565 WREG32(0x6578, 0x0);
566 WREG32(0x657C, 0x841880A8);
567 WREG32(0x6578, 0x1);
568 WREG32(0x657C, 0x84208680);
569 WREG32(0x6578, 0x2);
570 WREG32(0x657C, 0xBFF880B0);
571 WREG32(0x6578, 0x100);
572 WREG32(0x657C, 0x83D88088);
573 WREG32(0x6578, 0x101);
574 WREG32(0x657C, 0x84608680);
575 WREG32(0x6578, 0x102);
576 WREG32(0x657C, 0xBFF080D0);
577 WREG32(0x6578, 0x200);
578 WREG32(0x657C, 0x83988068);
579 WREG32(0x6578, 0x201);
580 WREG32(0x657C, 0x84A08680);
581 WREG32(0x6578, 0x202);
582 WREG32(0x657C, 0xBFF080F8);
583 WREG32(0x6578, 0x300);
584 WREG32(0x657C, 0x83588058);
585 WREG32(0x6578, 0x301);
586 WREG32(0x657C, 0x84E08660);
587 WREG32(0x6578, 0x302);
588 WREG32(0x657C, 0xBFF88120);
589 WREG32(0x6578, 0x400);
590 WREG32(0x657C, 0x83188040);
591 WREG32(0x6578, 0x401);
592 WREG32(0x657C, 0x85008660);
593 WREG32(0x6578, 0x402);
594 WREG32(0x657C, 0xBFF88150);
595 WREG32(0x6578, 0x500);
596 WREG32(0x657C, 0x82D88030);
597 WREG32(0x6578, 0x501);
598 WREG32(0x657C, 0x85408640);
599 WREG32(0x6578, 0x502);
600 WREG32(0x657C, 0xBFF88180);
601 WREG32(0x6578, 0x600);
602 WREG32(0x657C, 0x82A08018);
603 WREG32(0x6578, 0x601);
604 WREG32(0x657C, 0x85808620);
605 WREG32(0x6578, 0x602);
606 WREG32(0x657C, 0xBFF081B8);
607 WREG32(0x6578, 0x700);
608 WREG32(0x657C, 0x82608010);
609 WREG32(0x6578, 0x701);
610 WREG32(0x657C, 0x85A08600);
611 WREG32(0x6578, 0x702);
612 WREG32(0x657C, 0x800081F0);
613 WREG32(0x6578, 0x800);
614 WREG32(0x657C, 0x8228BFF8);
615 WREG32(0x6578, 0x801);
616 WREG32(0x657C, 0x85E085E0);
617 WREG32(0x6578, 0x802);
618 WREG32(0x657C, 0xBFF88228);
619 WREG32(0x6578, 0x10000);
620 WREG32(0x657C, 0x82A8BF00);
621 WREG32(0x6578, 0x10001);
622 WREG32(0x657C, 0x82A08CC0);
623 WREG32(0x6578, 0x10002);
624 WREG32(0x657C, 0x8008BEF8);
625 WREG32(0x6578, 0x10100);
626 WREG32(0x657C, 0x81F0BF28);
627 WREG32(0x6578, 0x10101);
628 WREG32(0x657C, 0x83608CA0);
629 WREG32(0x6578, 0x10102);
630 WREG32(0x657C, 0x8018BED0);
631 WREG32(0x6578, 0x10200);
632 WREG32(0x657C, 0x8148BF38);
633 WREG32(0x6578, 0x10201);
634 WREG32(0x657C, 0x84408C80);
635 WREG32(0x6578, 0x10202);
636 WREG32(0x657C, 0x8008BEB8);
637 WREG32(0x6578, 0x10300);
638 WREG32(0x657C, 0x80B0BF78);
639 WREG32(0x6578, 0x10301);
640 WREG32(0x657C, 0x85008C20);
641 WREG32(0x6578, 0x10302);
642 WREG32(0x657C, 0x8020BEA0);
643 WREG32(0x6578, 0x10400);
644 WREG32(0x657C, 0x8028BF90);
645 WREG32(0x6578, 0x10401);
646 WREG32(0x657C, 0x85E08BC0);
647 WREG32(0x6578, 0x10402);
648 WREG32(0x657C, 0x8018BE90);
649 WREG32(0x6578, 0x10500);
650 WREG32(0x657C, 0xBFB8BFB0);
651 WREG32(0x6578, 0x10501);
652 WREG32(0x657C, 0x86C08B40);
653 WREG32(0x6578, 0x10502);
654 WREG32(0x657C, 0x8010BE90);
655 WREG32(0x6578, 0x10600);
656 WREG32(0x657C, 0xBF58BFC8);
657 WREG32(0x6578, 0x10601);
658 WREG32(0x657C, 0x87A08AA0);
659 WREG32(0x6578, 0x10602);
660 WREG32(0x657C, 0x8010BE98);
661 WREG32(0x6578, 0x10700);
662 WREG32(0x657C, 0xBF10BFF0);
663 WREG32(0x6578, 0x10701);
664 WREG32(0x657C, 0x886089E0);
665 WREG32(0x6578, 0x10702);
666 WREG32(0x657C, 0x8018BEB0);
667 WREG32(0x6578, 0x10800);
668 WREG32(0x657C, 0xBED8BFE8);
669 WREG32(0x6578, 0x10801);
670 WREG32(0x657C, 0x89408940);
671 WREG32(0x6578, 0x10802);
672 WREG32(0x657C, 0xBFE8BED8);
673 WREG32(0x6578, 0x20000);
674 WREG32(0x657C, 0x80008000);
675 WREG32(0x6578, 0x20001);
676 WREG32(0x657C, 0x90008000);
677 WREG32(0x6578, 0x20002);
678 WREG32(0x657C, 0x80008000);
679 WREG32(0x6578, 0x20003);
680 WREG32(0x657C, 0x80008000);
681 WREG32(0x6578, 0x20100);
682 WREG32(0x657C, 0x80108000);
683 WREG32(0x6578, 0x20101);
684 WREG32(0x657C, 0x8FE0BF70);
685 WREG32(0x6578, 0x20102);
686 WREG32(0x657C, 0xBFE880C0);
687 WREG32(0x6578, 0x20103);
688 WREG32(0x657C, 0x80008000);
689 WREG32(0x6578, 0x20200);
690 WREG32(0x657C, 0x8018BFF8);
691 WREG32(0x6578, 0x20201);
692 WREG32(0x657C, 0x8F80BF08);
693 WREG32(0x6578, 0x20202);
694 WREG32(0x657C, 0xBFD081A0);
695 WREG32(0x6578, 0x20203);
696 WREG32(0x657C, 0xBFF88000);
697 WREG32(0x6578, 0x20300);
698 WREG32(0x657C, 0x80188000);
699 WREG32(0x6578, 0x20301);
700 WREG32(0x657C, 0x8EE0BEC0);
701 WREG32(0x6578, 0x20302);
702 WREG32(0x657C, 0xBFB082A0);
703 WREG32(0x6578, 0x20303);
704 WREG32(0x657C, 0x80008000);
705 WREG32(0x6578, 0x20400);
706 WREG32(0x657C, 0x80188000);
707 WREG32(0x6578, 0x20401);
708 WREG32(0x657C, 0x8E00BEA0);
709 WREG32(0x6578, 0x20402);
710 WREG32(0x657C, 0xBF8883C0);
711 WREG32(0x6578, 0x20403);
712 WREG32(0x657C, 0x80008000);
713 WREG32(0x6578, 0x20500);
714 WREG32(0x657C, 0x80188000);
715 WREG32(0x6578, 0x20501);
716 WREG32(0x657C, 0x8D00BE90);
717 WREG32(0x6578, 0x20502);
718 WREG32(0x657C, 0xBF588500);
719 WREG32(0x6578, 0x20503);
720 WREG32(0x657C, 0x80008008);
721 WREG32(0x6578, 0x20600);
722 WREG32(0x657C, 0x80188000);
723 WREG32(0x6578, 0x20601);
724 WREG32(0x657C, 0x8BC0BE98);
725 WREG32(0x6578, 0x20602);
726 WREG32(0x657C, 0xBF308660);
727 WREG32(0x6578, 0x20603);
728 WREG32(0x657C, 0x80008008);
729 WREG32(0x6578, 0x20700);
730 WREG32(0x657C, 0x80108000);
731 WREG32(0x6578, 0x20701);
732 WREG32(0x657C, 0x8A80BEB0);
733 WREG32(0x6578, 0x20702);
734 WREG32(0x657C, 0xBF0087C0);
735 WREG32(0x6578, 0x20703);
736 WREG32(0x657C, 0x80008008);
737 WREG32(0x6578, 0x20800);
738 WREG32(0x657C, 0x80108000);
739 WREG32(0x6578, 0x20801);
740 WREG32(0x657C, 0x8920BED0);
741 WREG32(0x6578, 0x20802);
742 WREG32(0x657C, 0xBED08920);
743 WREG32(0x6578, 0x20803);
744 WREG32(0x657C, 0x80008010);
745 WREG32(0x6578, 0x30000);
746 WREG32(0x657C, 0x90008000);
747 WREG32(0x6578, 0x30001);
748 WREG32(0x657C, 0x80008000);
749 WREG32(0x6578, 0x30100);
750 WREG32(0x657C, 0x8FE0BF90);
751 WREG32(0x6578, 0x30101);
752 WREG32(0x657C, 0xBFF880A0);
753 WREG32(0x6578, 0x30200);
754 WREG32(0x657C, 0x8F60BF40);
755 WREG32(0x6578, 0x30201);
756 WREG32(0x657C, 0xBFE88180);
757 WREG32(0x6578, 0x30300);
758 WREG32(0x657C, 0x8EC0BF00);
759 WREG32(0x6578, 0x30301);
760 WREG32(0x657C, 0xBFC88280);
761 WREG32(0x6578, 0x30400);
762 WREG32(0x657C, 0x8DE0BEE0);
763 WREG32(0x6578, 0x30401);
764 WREG32(0x657C, 0xBFA083A0);
765 WREG32(0x6578, 0x30500);
766 WREG32(0x657C, 0x8CE0BED0);
767 WREG32(0x6578, 0x30501);
768 WREG32(0x657C, 0xBF7884E0);
769 WREG32(0x6578, 0x30600);
770 WREG32(0x657C, 0x8BA0BED8);
771 WREG32(0x6578, 0x30601);
772 WREG32(0x657C, 0xBF508640);
773 WREG32(0x6578, 0x30700);
774 WREG32(0x657C, 0x8A60BEE8);
775 WREG32(0x6578, 0x30701);
776 WREG32(0x657C, 0xBF2087A0);
777 WREG32(0x6578, 0x30800);
778 WREG32(0x657C, 0x8900BF00);
779 WREG32(0x6578, 0x30801);
780 WREG32(0x657C, 0xBF008900);
781}
782
783struct rv515_watermark {
784 u32 lb_request_fifo_depth;
785 fixed20_12 num_line_pair;
786 fixed20_12 estimated_width;
787 fixed20_12 worst_case_latency;
788 fixed20_12 consumption_rate;
789 fixed20_12 active_time;
790 fixed20_12 dbpp;
791 fixed20_12 priority_mark_max;
792 fixed20_12 priority_mark;
793 fixed20_12 sclk;
794};
795
796void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
797 struct radeon_crtc *crtc,
798 struct rv515_watermark *wm)
799{
800 struct drm_display_mode *mode = &crtc->base.mode;
801 fixed20_12 a, b, c;
802 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
803 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
804
805 if (!crtc->base.enabled) {
806 /* FIXME: wouldn't it better to set priority mark to maximum */
807 wm->lb_request_fifo_depth = 4;
808 return;
809 }
810
811 if (crtc->vsc.full > rfixed_const(2))
812 wm->num_line_pair.full = rfixed_const(2);
813 else
814 wm->num_line_pair.full = rfixed_const(1);
815
816 b.full = rfixed_const(mode->crtc_hdisplay);
817 c.full = rfixed_const(256);
818 a.full = rfixed_mul(wm->num_line_pair, b);
819 request_fifo_depth.full = rfixed_div(a, c);
820 if (a.full < rfixed_const(4)) {
821 wm->lb_request_fifo_depth = 4;
822 } else {
823 wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
824 }
825
826 /* Determine consumption rate
827 * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
828 * vtaps = number of vertical taps,
829 * vsc = vertical scaling ratio, defined as source/destination
830 * hsc = horizontal scaling ration, defined as source/destination
831 */
832 a.full = rfixed_const(mode->clock);
833 b.full = rfixed_const(1000);
834 a.full = rfixed_div(a, b);
835 pclk.full = rfixed_div(b, a);
836 if (crtc->rmx_type != RMX_OFF) {
837 b.full = rfixed_const(2);
838 if (crtc->vsc.full > b.full)
839 b.full = crtc->vsc.full;
840 b.full = rfixed_mul(b, crtc->hsc);
841 c.full = rfixed_const(2);
842 b.full = rfixed_div(b, c);
843 consumption_time.full = rfixed_div(pclk, b);
844 } else {
845 consumption_time.full = pclk.full;
846 }
847 a.full = rfixed_const(1);
848 wm->consumption_rate.full = rfixed_div(a, consumption_time);
849
850
851 /* Determine line time
852 * LineTime = total time for one line of displayhtotal
853 * LineTime = total number of horizontal pixels
854 * pclk = pixel clock period(ns)
855 */
856 a.full = rfixed_const(crtc->base.mode.crtc_htotal);
857 line_time.full = rfixed_mul(a, pclk);
858
859 /* Determine active time
860 * ActiveTime = time of active region of display within one line,
861 * hactive = total number of horizontal active pixels
862 * htotal = total number of horizontal pixels
863 */
864 a.full = rfixed_const(crtc->base.mode.crtc_htotal);
865 b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
866 wm->active_time.full = rfixed_mul(line_time, b);
867 wm->active_time.full = rfixed_div(wm->active_time, a);
868
869 /* Determine chunk time
870 * ChunkTime = the time it takes the DCP to send one chunk of data
871 * to the LB which consists of pipeline delay and inter chunk gap
872 * sclk = system clock(Mhz)
873 */
874 a.full = rfixed_const(600 * 1000);
875 chunk_time.full = rfixed_div(a, rdev->pm.sclk);
876 read_delay_latency.full = rfixed_const(1000);
877
878 /* Determine the worst case latency
879 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
880 * WorstCaseLatency = worst case time from urgent to when the MC starts
881 * to return data
882 * READ_DELAY_IDLE_MAX = constant of 1us
883 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
884 * which consists of pipeline delay and inter chunk gap
885 */
886 if (rfixed_trunc(wm->num_line_pair) > 1) {
887 a.full = rfixed_const(3);
888 wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
889 wm->worst_case_latency.full += read_delay_latency.full;
890 } else {
891 wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
892 }
893
894 /* Determine the tolerable latency
895 * TolerableLatency = Any given request has only 1 line time
896 * for the data to be returned
897 * LBRequestFifoDepth = Number of chunk requests the LB can
898 * put into the request FIFO for a display
899 * LineTime = total time for one line of display
900 * ChunkTime = the time it takes the DCP to send one chunk
901 * of data to the LB which consists of
902 * pipeline delay and inter chunk gap
903 */
904 if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
905 tolerable_latency.full = line_time.full;
906 } else {
907 tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
908 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
909 tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
910 tolerable_latency.full = line_time.full - tolerable_latency.full;
911 }
912 /* We assume worst case 32bits (4 bytes) */
913 wm->dbpp.full = rfixed_const(2 * 16);
914
915 /* Determine the maximum priority mark
916 * width = viewport width in pixels
917 */
918 a.full = rfixed_const(16);
919 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
920 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
921
922 /* Determine estimated width */
923 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
924 estimated_width.full = rfixed_div(estimated_width, consumption_time);
925 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
926 wm->priority_mark.full = rfixed_const(10);
927 } else {
928 a.full = rfixed_const(16);
929 wm->priority_mark.full = rfixed_div(estimated_width, a);
930 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
931 }
932}
933
934void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
935{
936 struct drm_display_mode *mode0 = NULL;
937 struct drm_display_mode *mode1 = NULL;
938 struct rv515_watermark wm0;
939 struct rv515_watermark wm1;
940 u32 tmp;
941 fixed20_12 priority_mark02, priority_mark12, fill_rate;
942 fixed20_12 a, b;
943
944 if (rdev->mode_info.crtcs[0]->base.enabled)
945 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
946 if (rdev->mode_info.crtcs[1]->base.enabled)
947 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
948 rs690_line_buffer_adjust(rdev, mode0, mode1);
949
950 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
951 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
952
953 tmp = wm0.lb_request_fifo_depth;
954 tmp |= wm1.lb_request_fifo_depth << 16;
955 WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
956
957 if (mode0 && mode1) {
958 if (rfixed_trunc(wm0.dbpp) > 64)
959 a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
960 else
961 a.full = wm0.num_line_pair.full;
962 if (rfixed_trunc(wm1.dbpp) > 64)
963 b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
964 else
965 b.full = wm1.num_line_pair.full;
966 a.full += b.full;
967 fill_rate.full = rfixed_div(wm0.sclk, a);
968 if (wm0.consumption_rate.full > fill_rate.full) {
969 b.full = wm0.consumption_rate.full - fill_rate.full;
970 b.full = rfixed_mul(b, wm0.active_time);
971 a.full = rfixed_const(16);
972 b.full = rfixed_div(b, a);
973 a.full = rfixed_mul(wm0.worst_case_latency,
974 wm0.consumption_rate);
975 priority_mark02.full = a.full + b.full;
976 } else {
977 a.full = rfixed_mul(wm0.worst_case_latency,
978 wm0.consumption_rate);
979 b.full = rfixed_const(16 * 1000);
980 priority_mark02.full = rfixed_div(a, b);
981 }
982 if (wm1.consumption_rate.full > fill_rate.full) {
983 b.full = wm1.consumption_rate.full - fill_rate.full;
984 b.full = rfixed_mul(b, wm1.active_time);
985 a.full = rfixed_const(16);
986 b.full = rfixed_div(b, a);
987 a.full = rfixed_mul(wm1.worst_case_latency,
988 wm1.consumption_rate);
989 priority_mark12.full = a.full + b.full;
990 } else {
991 a.full = rfixed_mul(wm1.worst_case_latency,
992 wm1.consumption_rate);
993 b.full = rfixed_const(16 * 1000);
994 priority_mark12.full = rfixed_div(a, b);
995 }
996 if (wm0.priority_mark.full > priority_mark02.full)
997 priority_mark02.full = wm0.priority_mark.full;
998 if (rfixed_trunc(priority_mark02) < 0)
999 priority_mark02.full = 0;
1000 if (wm0.priority_mark_max.full > priority_mark02.full)
1001 priority_mark02.full = wm0.priority_mark_max.full;
1002 if (wm1.priority_mark.full > priority_mark12.full)
1003 priority_mark12.full = wm1.priority_mark.full;
1004 if (rfixed_trunc(priority_mark12) < 0)
1005 priority_mark12.full = 0;
1006 if (wm1.priority_mark_max.full > priority_mark12.full)
1007 priority_mark12.full = wm1.priority_mark_max.full;
1008 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1009 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1010 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1011 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1012 } else if (mode0) {
1013 if (rfixed_trunc(wm0.dbpp) > 64)
1014 a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
1015 else
1016 a.full = wm0.num_line_pair.full;
1017 fill_rate.full = rfixed_div(wm0.sclk, a);
1018 if (wm0.consumption_rate.full > fill_rate.full) {
1019 b.full = wm0.consumption_rate.full - fill_rate.full;
1020 b.full = rfixed_mul(b, wm0.active_time);
1021 a.full = rfixed_const(16);
1022 b.full = rfixed_div(b, a);
1023 a.full = rfixed_mul(wm0.worst_case_latency,
1024 wm0.consumption_rate);
1025 priority_mark02.full = a.full + b.full;
1026 } else {
1027 a.full = rfixed_mul(wm0.worst_case_latency,
1028 wm0.consumption_rate);
1029 b.full = rfixed_const(16);
1030 priority_mark02.full = rfixed_div(a, b);
1031 }
1032 if (wm0.priority_mark.full > priority_mark02.full)
1033 priority_mark02.full = wm0.priority_mark.full;
1034 if (rfixed_trunc(priority_mark02) < 0)
1035 priority_mark02.full = 0;
1036 if (wm0.priority_mark_max.full > priority_mark02.full)
1037 priority_mark02.full = wm0.priority_mark_max.full;
1038 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1039 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1040 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1041 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1042 } else {
1043 if (rfixed_trunc(wm1.dbpp) > 64)
1044 a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
1045 else
1046 a.full = wm1.num_line_pair.full;
1047 fill_rate.full = rfixed_div(wm1.sclk, a);
1048 if (wm1.consumption_rate.full > fill_rate.full) {
1049 b.full = wm1.consumption_rate.full - fill_rate.full;
1050 b.full = rfixed_mul(b, wm1.active_time);
1051 a.full = rfixed_const(16);
1052 b.full = rfixed_div(b, a);
1053 a.full = rfixed_mul(wm1.worst_case_latency,
1054 wm1.consumption_rate);
1055 priority_mark12.full = a.full + b.full;
1056 } else {
1057 a.full = rfixed_mul(wm1.worst_case_latency,
1058 wm1.consumption_rate);
1059 b.full = rfixed_const(16 * 1000);
1060 priority_mark12.full = rfixed_div(a, b);
1061 }
1062 if (wm1.priority_mark.full > priority_mark12.full)
1063 priority_mark12.full = wm1.priority_mark.full;
1064 if (rfixed_trunc(priority_mark12) < 0)
1065 priority_mark12.full = 0;
1066 if (wm1.priority_mark_max.full > priority_mark12.full)
1067 priority_mark12.full = wm1.priority_mark_max.full;
1068 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1069 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1070 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1071 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1072 }
1073}
1074
1075void rv515_bandwidth_update(struct radeon_device *rdev)
1076{
1077 uint32_t tmp;
1078 struct drm_display_mode *mode0 = NULL;
1079 struct drm_display_mode *mode1 = NULL;
1080
1081 if (rdev->mode_info.crtcs[0]->base.enabled)
1082 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1083 if (rdev->mode_info.crtcs[1]->base.enabled)
1084 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1085 /*
1086 * Set display0/1 priority up in the memory controller for
1087 * modes if the user specifies HIGH for displaypriority
1088 * option.
1089 */
1090 if (rdev->disp_priority == 2) {
1091 tmp = RREG32_MC(MC_MISC_LAT_TIMER);
1092 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
1093 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
1094 if (mode1)
1095 tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
1096 if (mode0)
1097 tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
1098 WREG32_MC(MC_MISC_LAT_TIMER, tmp);
1099 }
1100 rv515_bandwidth_avivo_update(rdev);
1101}
diff --git a/drivers/gpu/drm/radeon/rv515r.h b/drivers/gpu/drm/radeon/rv515r.h
new file mode 100644
index 000000000000..f3cf84039906
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv515r.h
@@ -0,0 +1,170 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef RV515R_H
29#define RV515R_H
30
31/* RV515 registers */
32#define PCIE_INDEX 0x0030
33#define PCIE_DATA 0x0034
34#define MC_IND_INDEX 0x0070
35#define MC_IND_WR_EN (1 << 24)
36#define MC_IND_DATA 0x0074
37#define RBBM_SOFT_RESET 0x00F0
38#define CONFIG_MEMSIZE 0x00F8
39#define HDP_FB_LOCATION 0x0134
40#define CP_CSQ_CNTL 0x0740
41#define CP_CSQ_MODE 0x0744
42#define CP_CSQ_ADDR 0x07F0
43#define CP_CSQ_DATA 0x07F4
44#define CP_CSQ_STAT 0x07F8
45#define CP_CSQ2_STAT 0x07FC
46#define RBBM_STATUS 0x0E40
47#define DST_PIPE_CONFIG 0x170C
48#define WAIT_UNTIL 0x1720
49#define WAIT_2D_IDLE (1 << 14)
50#define WAIT_3D_IDLE (1 << 15)
51#define WAIT_2D_IDLECLEAN (1 << 16)
52#define WAIT_3D_IDLECLEAN (1 << 17)
53#define ISYNC_CNTL 0x1724
54#define ISYNC_ANY2D_IDLE3D (1 << 0)
55#define ISYNC_ANY3D_IDLE2D (1 << 1)
56#define ISYNC_TRIG2D_IDLE3D (1 << 2)
57#define ISYNC_TRIG3D_IDLE2D (1 << 3)
58#define ISYNC_WAIT_IDLEGUI (1 << 4)
59#define ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
60#define VAP_INDEX_OFFSET 0x208C
61#define VAP_PVS_STATE_FLUSH_REG 0x2284
62#define GB_ENABLE 0x4008
63#define GB_MSPOS0 0x4010
64#define MS_X0_SHIFT 0
65#define MS_Y0_SHIFT 4
66#define MS_X1_SHIFT 8
67#define MS_Y1_SHIFT 12
68#define MS_X2_SHIFT 16
69#define MS_Y2_SHIFT 20
70#define MSBD0_Y_SHIFT 24
71#define MSBD0_X_SHIFT 28
72#define GB_MSPOS1 0x4014
73#define MS_X3_SHIFT 0
74#define MS_Y3_SHIFT 4
75#define MS_X4_SHIFT 8
76#define MS_Y4_SHIFT 12
77#define MS_X5_SHIFT 16
78#define MS_Y5_SHIFT 20
79#define MSBD1_SHIFT 24
80#define GB_TILE_CONFIG 0x4018
81#define ENABLE_TILING (1 << 0)
82#define PIPE_COUNT_MASK 0x0000000E
83#define PIPE_COUNT_SHIFT 1
84#define TILE_SIZE_8 (0 << 4)
85#define TILE_SIZE_16 (1 << 4)
86#define TILE_SIZE_32 (2 << 4)
87#define SUBPIXEL_1_12 (0 << 16)
88#define SUBPIXEL_1_16 (1 << 16)
89#define GB_SELECT 0x401C
90#define GB_AA_CONFIG 0x4020
91#define GB_PIPE_SELECT 0x402C
92#define GA_ENHANCE 0x4274
93#define GA_DEADLOCK_CNTL (1 << 0)
94#define GA_FASTSYNC_CNTL (1 << 1)
95#define GA_POLY_MODE 0x4288
96#define FRONT_PTYPE_POINT (0 << 4)
97#define FRONT_PTYPE_LINE (1 << 4)
98#define FRONT_PTYPE_TRIANGE (2 << 4)
99#define BACK_PTYPE_POINT (0 << 7)
100#define BACK_PTYPE_LINE (1 << 7)
101#define BACK_PTYPE_TRIANGE (2 << 7)
102#define GA_ROUND_MODE 0x428C
103#define GEOMETRY_ROUND_TRUNC (0 << 0)
104#define GEOMETRY_ROUND_NEAREST (1 << 0)
105#define COLOR_ROUND_TRUNC (0 << 2)
106#define COLOR_ROUND_NEAREST (1 << 2)
107#define SU_REG_DEST 0x42C8
108#define RB3D_DSTCACHE_CTLSTAT 0x4E4C
109#define RB3D_DC_FLUSH (2 << 0)
110#define RB3D_DC_FREE (2 << 2)
111#define RB3D_DC_FINISH (1 << 4)
112#define ZB_ZCACHE_CTLSTAT 0x4F18
113#define ZC_FLUSH (1 << 0)
114#define ZC_FREE (1 << 1)
115#define DC_LB_MEMORY_SPLIT 0x6520
116#define DC_LB_MEMORY_SPLIT_MASK 0x00000003
117#define DC_LB_MEMORY_SPLIT_SHIFT 0
118#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
119#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
120#define DC_LB_MEMORY_SPLIT_D1_ONLY 2
121#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
122#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
123#define DC_LB_DISP1_END_ADR_SHIFT 4
124#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0
125#define D1MODE_PRIORITY_A_CNT 0x6548
126#define MODE_PRIORITY_MARK_MASK 0x00007FFF
127#define MODE_PRIORITY_OFF (1 << 16)
128#define MODE_PRIORITY_ALWAYS_ON (1 << 20)
129#define MODE_PRIORITY_FORCE_MASK (1 << 24)
130#define D1MODE_PRIORITY_B_CNT 0x654C
131#define LB_MAX_REQ_OUTSTANDING 0x6D58
132#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F
133#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0
134#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000
135#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16
136#define D2MODE_PRIORITY_A_CNT 0x6D48
137#define D2MODE_PRIORITY_B_CNT 0x6D4C
138
139/* ix[MC] registers */
140#define MC_FB_LOCATION 0x01
141#define MC_FB_START_MASK 0x0000FFFF
142#define MC_FB_START_SHIFT 0
143#define MC_FB_TOP_MASK 0xFFFF0000
144#define MC_FB_TOP_SHIFT 16
145#define MC_AGP_LOCATION 0x02
146#define MC_AGP_START_MASK 0x0000FFFF
147#define MC_AGP_START_SHIFT 0
148#define MC_AGP_TOP_MASK 0xFFFF0000
149#define MC_AGP_TOP_SHIFT 16
150#define MC_AGP_BASE 0x03
151#define MC_AGP_BASE_2 0x04
152#define MC_CNTL 0x5
153#define MEM_NUM_CHANNELS_MASK 0x00000003
154#define MC_STATUS 0x08
155#define MC_STATUS_IDLE (1 << 4)
156#define MC_MISC_LAT_TIMER 0x09
157#define MC_CPR_INIT_LAT_MASK 0x0000000F
158#define MC_VF_INIT_LAT_MASK 0x000000F0
159#define MC_DISP0R_INIT_LAT_MASK 0x00000F00
160#define MC_DISP0R_INIT_LAT_SHIFT 8
161#define MC_DISP1R_INIT_LAT_MASK 0x0000F000
162#define MC_DISP1R_INIT_LAT_SHIFT 12
163#define MC_FIXED_INIT_LAT_MASK 0x000F0000
164#define MC_E2R_INIT_LAT_MASK 0x00F00000
165#define SAME_PAGE_PRIO_MASK 0x0F000000
166#define MC_GLOBW_INIT_LAT_MASK 0xF0000000
167
168
169#endif
170
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index da50cc51ede3..21d8ffd57308 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -67,7 +67,7 @@ int rv770_mc_init(struct radeon_device *rdev)
67 "programming pipes. Bad things might happen.\n"); 67 "programming pipes. Bad things might happen.\n");
68 } 68 }
69 69
70 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 70 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
71 tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24); 71 tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24);
72 tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24); 72 tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24);
73 WREG32(R700_MC_VM_FB_LOCATION, tmp); 73 WREG32(R700_MC_VM_FB_LOCATION, tmp);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c1c407f7cca3..c2b0d710d10f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -43,7 +43,6 @@
43#define TTM_BO_HASH_ORDER 13 43#define TTM_BO_HASH_ORDER 13
44 44
45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); 45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 46static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48 47
49static inline uint32_t ttm_bo_type_flags(unsigned type) 48static inline uint32_t ttm_bo_type_flags(unsigned type)
@@ -224,6 +223,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
224 TTM_ASSERT_LOCKED(&bo->mutex); 223 TTM_ASSERT_LOCKED(&bo->mutex);
225 bo->ttm = NULL; 224 bo->ttm = NULL;
226 225
226 if (bdev->need_dma32)
227 page_flags |= TTM_PAGE_FLAG_DMA32;
228
227 switch (bo->type) { 229 switch (bo->type) {
228 case ttm_bo_type_device: 230 case ttm_bo_type_device:
229 if (zero_alloc) 231 if (zero_alloc)
@@ -304,6 +306,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
304 306
305 } 307 }
306 308
309 if (bdev->driver->move_notify)
310 bdev->driver->move_notify(bo, mem);
311
307 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 312 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
308 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 313 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
309 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); 314 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
@@ -655,31 +660,52 @@ retry_pre_get:
655 return 0; 660 return 0;
656} 661}
657 662
663static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
664 uint32_t cur_placement,
665 uint32_t proposed_placement)
666{
667 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
668 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
669
670 /**
671 * Keep current caching if possible.
672 */
673
674 if ((cur_placement & caching) != 0)
675 result |= (cur_placement & caching);
676 else if ((man->default_caching & caching) != 0)
677 result |= man->default_caching;
678 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
679 result |= TTM_PL_FLAG_CACHED;
680 else if ((TTM_PL_FLAG_WC & caching) != 0)
681 result |= TTM_PL_FLAG_WC;
682 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
683 result |= TTM_PL_FLAG_UNCACHED;
684
685 return result;
686}
687
688
658static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 689static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
659 bool disallow_fixed, 690 bool disallow_fixed,
660 uint32_t mem_type, 691 uint32_t mem_type,
661 uint32_t mask, uint32_t *res_mask) 692 uint32_t proposed_placement,
693 uint32_t *masked_placement)
662{ 694{
663 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 695 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
664 696
665 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) 697 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
666 return false; 698 return false;
667 699
668 if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) 700 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
669 return false; 701 return false;
670 702
671 if ((mask & man->available_caching) == 0) 703 if ((proposed_placement & man->available_caching) == 0)
672 return false; 704 return false;
673 if (mask & man->default_caching)
674 cur_flags |= man->default_caching;
675 else if (mask & TTM_PL_FLAG_CACHED)
676 cur_flags |= TTM_PL_FLAG_CACHED;
677 else if (mask & TTM_PL_FLAG_WC)
678 cur_flags |= TTM_PL_FLAG_WC;
679 else
680 cur_flags |= TTM_PL_FLAG_UNCACHED;
681 705
682 *res_mask = cur_flags; 706 cur_flags |= (proposed_placement & man->available_caching);
707
708 *masked_placement = cur_flags;
683 return true; 709 return true;
684} 710}
685 711
@@ -723,6 +749,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
723 if (!type_ok) 749 if (!type_ok)
724 continue; 750 continue;
725 751
752 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
753 cur_flags);
754
726 if (mem_type == TTM_PL_SYSTEM) 755 if (mem_type == TTM_PL_SYSTEM)
727 break; 756 break;
728 757
@@ -779,6 +808,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
779 proposed_placement, &cur_flags)) 808 proposed_placement, &cur_flags))
780 continue; 809 continue;
781 810
811 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
812 cur_flags);
813
782 ret = ttm_bo_mem_force_space(bdev, mem, mem_type, 814 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
783 interruptible, no_wait); 815 interruptible, no_wait);
784 816
@@ -1150,13 +1182,14 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1150 1182
1151int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1183int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1152{ 1184{
1153 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1185 struct ttm_mem_type_manager *man;
1154 int ret = -EINVAL; 1186 int ret = -EINVAL;
1155 1187
1156 if (mem_type >= TTM_NUM_MEM_TYPES) { 1188 if (mem_type >= TTM_NUM_MEM_TYPES) {
1157 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); 1189 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1158 return ret; 1190 return ret;
1159 } 1191 }
1192 man = &bdev->man[mem_type];
1160 1193
1161 if (!man->has_type) { 1194 if (!man->has_type) {
1162 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " 1195 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
@@ -1305,7 +1338,8 @@ EXPORT_SYMBOL(ttm_bo_device_release);
1305 1338
1306int ttm_bo_device_init(struct ttm_bo_device *bdev, 1339int ttm_bo_device_init(struct ttm_bo_device *bdev,
1307 struct ttm_mem_global *mem_glob, 1340 struct ttm_mem_global *mem_glob,
1308 struct ttm_bo_driver *driver, uint64_t file_page_offset) 1341 struct ttm_bo_driver *driver, uint64_t file_page_offset,
1342 bool need_dma32)
1309{ 1343{
1310 int ret = -EINVAL; 1344 int ret = -EINVAL;
1311 1345
@@ -1342,6 +1376,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1342 INIT_LIST_HEAD(&bdev->ddestroy); 1376 INIT_LIST_HEAD(&bdev->ddestroy);
1343 INIT_LIST_HEAD(&bdev->swap_lru); 1377 INIT_LIST_HEAD(&bdev->swap_lru);
1344 bdev->dev_mapping = NULL; 1378 bdev->dev_mapping = NULL;
1379 bdev->need_dma32 = need_dma32;
1345 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); 1380 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
1346 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); 1381 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1347 if (unlikely(ret != 0)) { 1382 if (unlikely(ret != 0)) {
@@ -1419,6 +1454,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1419 1454
1420 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1455 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1421} 1456}
1457EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1422 1458
1423static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1459static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1424{ 1460{
@@ -1540,6 +1576,10 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1540 driver->sync_obj_unref(&sync_obj); 1576 driver->sync_obj_unref(&sync_obj);
1541 driver->sync_obj_unref(&tmp_obj); 1577 driver->sync_obj_unref(&tmp_obj);
1542 spin_lock(&bo->lock); 1578 spin_lock(&bo->lock);
1579 } else {
1580 spin_unlock(&bo->lock);
1581 driver->sync_obj_unref(&sync_obj);
1582 spin_lock(&bo->lock);
1543 } 1583 }
1544 } 1584 }
1545 return 0; 1585 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 517c84559633..ad4ada07c6cf 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -34,7 +34,6 @@
34#include <linux/highmem.h> 34#include <linux/highmem.h>
35#include <linux/wait.h> 35#include <linux/wait.h>
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/version.h>
38#include <linux/module.h> 37#include <linux/module.h>
39 38
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 39void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
@@ -137,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
137} 136}
138 137
139static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 138static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
140 unsigned long page) 139 unsigned long page,
140 pgprot_t prot)
141{ 141{
142 struct page *d = ttm_tt_get_page(ttm, page); 142 struct page *d = ttm_tt_get_page(ttm, page);
143 void *dst; 143 void *dst;
@@ -146,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
146 return -ENOMEM; 146 return -ENOMEM;
147 147
148 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 148 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
149 dst = kmap(d); 149
150#ifdef CONFIG_X86
151 dst = kmap_atomic_prot(d, KM_USER0, prot);
152#else
153 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
154 dst = vmap(&d, 1, 0, prot);
155 else
156 dst = kmap(d);
157#endif
150 if (!dst) 158 if (!dst)
151 return -ENOMEM; 159 return -ENOMEM;
152 160
153 memcpy_fromio(dst, src, PAGE_SIZE); 161 memcpy_fromio(dst, src, PAGE_SIZE);
154 kunmap(d); 162
163#ifdef CONFIG_X86
164 kunmap_atomic(dst, KM_USER0);
165#else
166 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
167 vunmap(dst);
168 else
169 kunmap(d);
170#endif
171
155 return 0; 172 return 0;
156} 173}
157 174
158static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 175static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
159 unsigned long page) 176 unsigned long page,
177 pgprot_t prot)
160{ 178{
161 struct page *s = ttm_tt_get_page(ttm, page); 179 struct page *s = ttm_tt_get_page(ttm, page);
162 void *src; 180 void *src;
@@ -165,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
165 return -ENOMEM; 183 return -ENOMEM;
166 184
167 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 185 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
168 src = kmap(s); 186#ifdef CONFIG_X86
187 src = kmap_atomic_prot(s, KM_USER0, prot);
188#else
189 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
190 src = vmap(&s, 1, 0, prot);
191 else
192 src = kmap(s);
193#endif
169 if (!src) 194 if (!src)
170 return -ENOMEM; 195 return -ENOMEM;
171 196
172 memcpy_toio(dst, src, PAGE_SIZE); 197 memcpy_toio(dst, src, PAGE_SIZE);
173 kunmap(s); 198
199#ifdef CONFIG_X86
200 kunmap_atomic(src, KM_USER0);
201#else
202 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
203 vunmap(src);
204 else
205 kunmap(s);
206#endif
207
174 return 0; 208 return 0;
175} 209}
176 210
@@ -215,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
215 249
216 for (i = 0; i < new_mem->num_pages; ++i) { 250 for (i = 0; i < new_mem->num_pages; ++i) {
217 page = i * dir + add; 251 page = i * dir + add;
218 if (old_iomap == NULL) 252 if (old_iomap == NULL) {
219 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); 253 pgprot_t prot = ttm_io_prot(old_mem->placement,
220 else if (new_iomap == NULL) 254 PAGE_KERNEL);
221 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); 255 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
222 else 256 prot);
257 } else if (new_iomap == NULL) {
258 pgprot_t prot = ttm_io_prot(new_mem->placement,
259 PAGE_KERNEL);
260 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
261 prot);
262 } else
223 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 263 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
224 if (ret) 264 if (ret)
225 goto out1; 265 goto out1;
@@ -510,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
510 if (evict) { 550 if (evict) {
511 ret = ttm_bo_wait(bo, false, false, false); 551 ret = ttm_bo_wait(bo, false, false, false);
512 spin_unlock(&bo->lock); 552 spin_unlock(&bo->lock);
513 driver->sync_obj_unref(&bo->sync_obj); 553 if (tmp_obj)
514 554 driver->sync_obj_unref(&tmp_obj);
515 if (ret) 555 if (ret)
516 return ret; 556 return ret;
517 557
@@ -533,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
533 573
534 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 574 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
535 spin_unlock(&bo->lock); 575 spin_unlock(&bo->lock);
576 if (tmp_obj)
577 driver->sync_obj_unref(&tmp_obj);
536 578
537 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 579 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
538 if (ret) 580 if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 27b146c54fbc..33de7637c0c6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -32,7 +32,6 @@
32#include <ttm/ttm_bo_driver.h> 32#include <ttm/ttm_bo_driver.h>
33#include <ttm/ttm_placement.h> 33#include <ttm/ttm_placement.h>
34#include <linux/mm.h> 34#include <linux/mm.h>
35#include <linux/version.h>
36#include <linux/rbtree.h> 35#include <linux/rbtree.h>
37#include <linux/module.h> 36#include <linux/module.h>
38#include <linux/uaccess.h> 37#include <linux/uaccess.h>
@@ -102,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
102 return VM_FAULT_NOPAGE; 101 return VM_FAULT_NOPAGE;
103 } 102 }
104 103
104 if (bdev->driver->fault_reserve_notify)
105 bdev->driver->fault_reserve_notify(bo);
106
105 /* 107 /*
106 * Wait for buffer data in transit, due to a pipelined 108 * Wait for buffer data in transit, due to a pipelined
107 * move. 109 * move.
@@ -328,7 +330,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
328 goto out_unref; 330 goto out_unref;
329 331
330 kmap_offset = dev_offset - bo->vm_node->start; 332 kmap_offset = dev_offset - bo->vm_node->start;
331 if (unlikely(kmap_offset) >= bo->num_pages) { 333 if (unlikely(kmap_offset >= bo->num_pages)) {
332 ret = -EFBIG; 334 ret = -EFBIG;
333 goto out_unref; 335 goto out_unref;
334 } 336 }
@@ -402,7 +404,7 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
402 bool dummy; 404 bool dummy;
403 405
404 kmap_offset = (*f_pos >> PAGE_SHIFT); 406 kmap_offset = (*f_pos >> PAGE_SHIFT);
405 if (unlikely(kmap_offset) >= bo->num_pages) 407 if (unlikely(kmap_offset >= bo->num_pages))
406 return -EFBIG; 408 return -EFBIG;
407 409
408 page_offset = *f_pos & ~PAGE_MASK; 410 page_offset = *f_pos & ~PAGE_MASK;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 0331fa74cd3f..b8b6c4a5f983 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,7 +28,6 @@
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30 30
31#include <linux/version.h>
32#include <linux/vmalloc.h> 31#include <linux/vmalloc.h>
33#include <linux/sched.h> 32#include <linux/sched.h>
34#include <linux/highmem.h> 33#include <linux/highmem.h>
@@ -87,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
87 unsigned long i; 86 unsigned long i;
88 87
89 for (i = 0; i < num_pages; ++i) { 88 for (i = 0; i < num_pages; ++i) {
90 if (pages[i]) { 89 struct page *page = pages[i];
91 unsigned long start = (unsigned long)page_address(pages[i]); 90 void *page_virtual;
92 flush_dcache_range(start, start + PAGE_SIZE); 91
93 } 92 if (unlikely(page == NULL))
93 continue;
94
95 page_virtual = kmap_atomic(page, KM_USER0);
96 flush_dcache_range((unsigned long) page_virtual,
97 (unsigned long) page_virtual + PAGE_SIZE);
98 kunmap_atomic(page_virtual, KM_USER0);
94 } 99 }
95#else 100#else
96 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) 101 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
@@ -132,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
132 137
133static struct page *ttm_tt_alloc_page(unsigned page_flags) 138static struct page *ttm_tt_alloc_page(unsigned page_flags)
134{ 139{
140 gfp_t gfp_flags = GFP_USER;
141
135 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) 142 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
136 return alloc_page(GFP_HIGHUSER | __GFP_ZERO); 143 gfp_flags |= __GFP_ZERO;
144
145 if (page_flags & TTM_PAGE_FLAG_DMA32)
146 gfp_flags |= __GFP_DMA32;
147 else
148 gfp_flags |= __GFP_HIGHMEM;
137 149
138 return alloc_page(GFP_HIGHUSER); 150 return alloc_page(gfp_flags);
139} 151}
140 152
141static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 153static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index c248c1d37268..5935b8842e86 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -183,7 +183,7 @@ int via_enable_vblank(struct drm_device *dev, int crtc)
183 } 183 }
184 184
185 status = VIA_READ(VIA_REG_INTERRUPT); 185 status = VIA_READ(VIA_REG_INTERRUPT);
186 VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE); 186 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
187 187
188 VIA_WRITE8(0x83d4, 0x11); 188 VIA_WRITE8(0x83d4, 0x11);
189 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 189 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
@@ -194,6 +194,10 @@ int via_enable_vblank(struct drm_device *dev, int crtc)
194void via_disable_vblank(struct drm_device *dev, int crtc) 194void via_disable_vblank(struct drm_device *dev, int crtc)
195{ 195{
196 drm_via_private_t *dev_priv = dev->dev_private; 196 drm_via_private_t *dev_priv = dev->dev_private;
197 u32 status;
198
199 status = VIA_READ(VIA_REG_INTERRUPT);
200 VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
197 201
198 VIA_WRITE8(0x83d4, 0x11); 202 VIA_WRITE8(0x83d4, 0x11);
199 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); 203 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f2c21d5d24e8..5eb10c2ce665 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1075,14 +1075,16 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
1075 */ 1075 */
1076int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt) 1076int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
1077{ 1077{
1078 struct hid_report_enum *report_enum = hid->report_enum + type; 1078 struct hid_report_enum *report_enum;
1079 struct hid_driver *hdrv = hid->driver; 1079 struct hid_driver *hdrv;
1080 struct hid_report *report; 1080 struct hid_report *report;
1081 unsigned int i; 1081 unsigned int i;
1082 int ret; 1082 int ret;
1083 1083
1084 if (!hid || !hid->driver) 1084 if (!hid || !hid->driver)
1085 return -ENODEV; 1085 return -ENODEV;
1086 report_enum = hid->report_enum + type;
1087 hdrv = hid->driver;
1086 1088
1087 if (!size) { 1089 if (!size) {
1088 dbg_hid("empty report\n"); 1090 dbg_hid("empty report\n");
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 76c4bbe9dccb..3c1fcb7640ab 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -22,7 +22,6 @@
22#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/smp_lock.h>
26#include <linux/spinlock.h> 25#include <linux/spinlock.h>
27#include <asm/unaligned.h> 26#include <asm/unaligned.h>
28#include <asm/byteorder.h> 27#include <asm/byteorder.h>
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 9e9421525fb9..215b2addddbb 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -527,8 +527,10 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
527 goto goodreturn; 527 goto goodreturn;
528 528
529 case HIDIOCGCOLLECTIONINDEX: 529 case HIDIOCGCOLLECTIONINDEX:
530 i = field->usage[uref->usage_index].collection_index;
531 unlock_kernel();
530 kfree(uref_multi); 532 kfree(uref_multi);
531 return field->usage[uref->usage_index].collection_index; 533 return i;
532 case HIDIOCGUSAGES: 534 case HIDIOCGUSAGES:
533 for (i = 0; i < uref_multi->num_values; i++) 535 for (i = 0; i < uref_multi->num_values; i++)
534 uref_multi->values[i] = 536 uref_multi->values[i] =
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index ad2b3431b725..7d3f15d32fdf 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -357,7 +357,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
357 { "AUX5 Fan", 39, 2, 60, 1, 0 }, 357 { "AUX5 Fan", 39, 2, 60, 1, 0 },
358 { NULL, 0, 0, 0, 0, 0 } } 358 { NULL, 0, 0, 0, 0, 0 } }
359 }, 359 },
360 { 0x0014, NULL /* Abit AB9 Pro, need DMI string */, { 360 { 0x0014, "AB9", /* + AB9 Pro */ {
361 { "CPU Core", 0, 0, 10, 1, 0 }, 361 { "CPU Core", 0, 0, 10, 1, 0 },
362 { "DDR", 1, 0, 10, 1, 0 }, 362 { "DDR", 1, 0, 10, 1, 0 },
363 { "DDR VTT", 2, 0, 10, 1, 0 }, 363 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -455,7 +455,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
455 { "AUX3 FAN", 37, 2, 60, 1, 0 }, 455 { "AUX3 FAN", 37, 2, 60, 1, 0 },
456 { NULL, 0, 0, 0, 0, 0 } } 456 { NULL, 0, 0, 0, 0, 0 } }
457 }, 457 },
458 { 0x0018, NULL /* Unknown, need DMI string */, { 458 { 0x0018, "AB9 QuadGT", {
459 { "CPU Core", 0, 0, 10, 1, 0 }, 459 { "CPU Core", 0, 0, 10, 1, 0 },
460 { "DDR2", 1, 0, 20, 1, 0 }, 460 { "DDR2", 1, 0, 20, 1, 0 },
461 { "DDR2 VTT", 2, 0, 10, 1, 0 }, 461 { "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -564,7 +564,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
564 { "AUX3 Fan", 36, 2, 60, 1, 0 }, 564 { "AUX3 Fan", 36, 2, 60, 1, 0 },
565 { NULL, 0, 0, 0, 0, 0 } } 565 { NULL, 0, 0, 0, 0, 0 } }
566 }, 566 },
567 { 0x001C, NULL /* Unknown, need DMI string */, { 567 { 0x001C, "IX38 QuadGT", {
568 { "CPU Core", 0, 0, 10, 1, 0 }, 568 { "CPU Core", 0, 0, 10, 1, 0 },
569 { "DDR2", 1, 0, 20, 1, 0 }, 569 { "DDR2", 1, 0, 20, 1, 0 },
570 { "DDR2 VTT", 2, 0, 10, 1, 0 }, 570 { "DDR2 VTT", 2, 0, 10, 1, 0 },
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index bff0103610c1..fe4fa29c9219 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -593,7 +593,11 @@ static int atk_add_sensor(struct atk_data *data, union acpi_object *obj)
593 sensor->data = data; 593 sensor->data = data;
594 sensor->id = flags->integer.value; 594 sensor->id = flags->integer.value;
595 sensor->limit1 = limit1->integer.value; 595 sensor->limit1 = limit1->integer.value;
596 sensor->limit2 = limit2->integer.value; 596 if (data->old_interface)
597 sensor->limit2 = limit2->integer.value;
598 else
599 /* The upper limit is expressed as delta from lower limit */
600 sensor->limit2 = sensor->limit1 + limit2->integer.value;
597 601
598 snprintf(sensor->input_attr_name, ATTR_NAME_SIZE, 602 snprintf(sensor->input_attr_name, ATTR_NAME_SIZE,
599 "%s%d_input", base_name, start + *num); 603 "%s%d_input", base_name, start + *num);
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 86142a858238..58f66be61b1f 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -418,6 +418,7 @@ static ssize_t set_div(struct device *dev, struct device_attribute *devattr,
418 data->count = 3; 418 data->count = 3;
419 break; 419 break;
420 default: 420 default:
421 mutex_unlock(&data->update_lock);
421 dev_err(&client->dev, 422 dev_err(&client->dev,
422 "illegal value for fan divider (%d)\n", div); 423 "illegal value for fan divider (%d)\n", div);
423 return -EINVAL; 424 return -EINVAL;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 56cd6004da36..6290a259456e 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -257,7 +257,7 @@ static inline int sht15_update_single_val(struct sht15_data *data,
257 (data->flag == SHT15_READING_NOTHING), 257 (data->flag == SHT15_READING_NOTHING),
258 msecs_to_jiffies(timeout_msecs)); 258 msecs_to_jiffies(timeout_msecs));
259 if (ret == 0) {/* timeout occurred */ 259 if (ret == 0) {/* timeout occurred */
260 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));; 260 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
261 sht15_connection_reset(data); 261 sht15_connection_reset(data);
262 return -ETIME; 262 return -ETIME;
263 } 263 }
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index a92dbb97ee99..ba75bfcf14ce 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -86,6 +86,7 @@ superio_exit(void)
86#define SUPERIO_REG_ACT 0x30 86#define SUPERIO_REG_ACT 0x30
87#define SUPERIO_REG_BASE 0x60 87#define SUPERIO_REG_BASE 0x60
88#define SUPERIO_REG_DEVID 0x20 88#define SUPERIO_REG_DEVID 0x20
89#define SUPERIO_REG_DEVREV 0x21
89 90
90/* Logical device registers */ 91/* Logical device registers */
91 92
@@ -429,6 +430,9 @@ static int __init smsc47m1_find(unsigned short *addr,
429 * The LPC47M292 (device id 0x6B) is somewhat compatible, but it 430 * The LPC47M292 (device id 0x6B) is somewhat compatible, but it
430 * supports a 3rd fan, and the pin configuration registers are 431 * supports a 3rd fan, and the pin configuration registers are
431 * unfortunately different. 432 * unfortunately different.
433 * The LPC47M233 has the same device id (0x6B) but is not compatible.
434 * We check the high bit of the device revision register to
435 * differentiate them.
432 */ 436 */
433 switch (val) { 437 switch (val) {
434 case 0x51: 438 case 0x51:
@@ -448,6 +452,13 @@ static int __init smsc47m1_find(unsigned short *addr,
448 sio_data->type = smsc47m1; 452 sio_data->type = smsc47m1;
449 break; 453 break;
450 case 0x6B: 454 case 0x6B:
455 if (superio_inb(SUPERIO_REG_DEVREV) & 0x80) {
456 pr_debug(DRVNAME ": "
457 "Found SMSC LPC47M233, unsupported\n");
458 superio_exit();
459 return -ENODEV;
460 }
461
451 pr_info(DRVNAME ": Found SMSC LPC47M292\n"); 462 pr_info(DRVNAME ": Found SMSC LPC47M292\n");
452 sio_data->type = smsc47m2; 463 sio_data->type = smsc47m2;
453 break; 464 break;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index aa87b6a3bbef..8206442fbabd 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -328,6 +328,7 @@ config I2C_DAVINCI
328 328
329config I2C_DESIGNWARE 329config I2C_DESIGNWARE
330 tristate "Synopsys DesignWare" 330 tristate "Synopsys DesignWare"
331 depends on HAVE_CLK
331 help 332 help
332 If you say yes to this option, support will be included for the 333 If you say yes to this option, support will be included for the
333 Synopsys DesignWare I2C adapter. Only master mode is supported. 334 Synopsys DesignWare I2C adapter. Only master mode is supported.
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 3fae3a91ce5b..c89687a10835 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -187,6 +187,11 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev)
187 davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKH_REG, clkh); 187 davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKH_REG, clkh);
188 davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKL_REG, clkl); 188 davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKL_REG, clkl);
189 189
190 /* Respond at reserved "SMBus Host" slave address" (and zero);
191 * we seem to have no option to not respond...
192 */
193 davinci_i2c_write_reg(dev, DAVINCI_I2C_OAR_REG, 0x08);
194
190 dev_dbg(dev->dev, "input_clock = %d, CLK = %d\n", input_clock, clk); 195 dev_dbg(dev->dev, "input_clock = %d, CLK = %d\n", input_clock, clk);
191 dev_dbg(dev->dev, "PSC = %d\n", 196 dev_dbg(dev->dev, "PSC = %d\n",
192 davinci_i2c_read_reg(dev, DAVINCI_I2C_PSC_REG)); 197 davinci_i2c_read_reg(dev, DAVINCI_I2C_PSC_REG));
@@ -387,7 +392,7 @@ static void terminate_write(struct davinci_i2c_dev *dev)
387 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); 392 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
388 393
389 if (!dev->terminate) 394 if (!dev->terminate)
390 dev_err(dev->dev, "TDR IRQ while no data to send\n"); 395 dev_dbg(dev->dev, "TDR IRQ while no data to send\n");
391} 396}
392 397
393/* 398/*
@@ -473,9 +478,14 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id)
473 break; 478 break;
474 479
475 case DAVINCI_I2C_IVR_AAS: 480 case DAVINCI_I2C_IVR_AAS:
476 dev_warn(dev->dev, "Address as slave interrupt\n"); 481 dev_dbg(dev->dev, "Address as slave interrupt\n");
477 }/* switch */ 482 break;
478 }/* while */ 483
484 default:
485 dev_warn(dev->dev, "Unrecognized irq stat %d\n", stat);
486 break;
487 }
488 }
479 489
480 return count ? IRQ_HANDLED : IRQ_NONE; 490 return count ? IRQ_HANDLED : IRQ_NONE;
481} 491}
@@ -505,7 +515,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
505 return -ENODEV; 515 return -ENODEV;
506 } 516 }
507 517
508 ioarea = request_mem_region(mem->start, (mem->end - mem->start) + 1, 518 ioarea = request_mem_region(mem->start, resource_size(mem),
509 pdev->name); 519 pdev->name);
510 if (!ioarea) { 520 if (!ioarea) {
511 dev_err(&pdev->dev, "I2C region already claimed\n"); 521 dev_err(&pdev->dev, "I2C region already claimed\n");
@@ -523,7 +533,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
523 dev->irq = irq->start; 533 dev->irq = irq->start;
524 platform_set_drvdata(pdev, dev); 534 platform_set_drvdata(pdev, dev);
525 535
526 dev->clk = clk_get(&pdev->dev, "I2CCLK"); 536 dev->clk = clk_get(&pdev->dev, NULL);
527 if (IS_ERR(dev->clk)) { 537 if (IS_ERR(dev->clk)) {
528 r = -ENODEV; 538 r = -ENODEV;
529 goto err_free_mem; 539 goto err_free_mem;
@@ -568,7 +578,7 @@ err_free_mem:
568 put_device(&pdev->dev); 578 put_device(&pdev->dev);
569 kfree(dev); 579 kfree(dev);
570err_release_region: 580err_release_region:
571 release_mem_region(mem->start, (mem->end - mem->start) + 1); 581 release_mem_region(mem->start, resource_size(mem));
572 582
573 return r; 583 return r;
574} 584}
@@ -591,7 +601,7 @@ static int davinci_i2c_remove(struct platform_device *pdev)
591 kfree(dev); 601 kfree(dev);
592 602
593 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 603 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
594 release_mem_region(mem->start, (mem->end - mem->start) + 1); 604 release_mem_region(mem->start, resource_size(mem));
595 return 0; 605 return 0;
596} 606}
597 607
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index e4476743f203..b1bc6e277d2a 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -85,10 +85,11 @@ static void dump_iic_regs(const char* header, struct ibm_iic_private* dev)
85{ 85{
86 volatile struct iic_regs __iomem *iic = dev->vaddr; 86 volatile struct iic_regs __iomem *iic = dev->vaddr;
87 printk(KERN_DEBUG "ibm-iic%d: %s\n", dev->idx, header); 87 printk(KERN_DEBUG "ibm-iic%d: %s\n", dev->idx, header);
88 printk(KERN_DEBUG " cntl = 0x%02x, mdcntl = 0x%02x\n" 88 printk(KERN_DEBUG
89 KERN_DEBUG " sts = 0x%02x, extsts = 0x%02x\n" 89 " cntl = 0x%02x, mdcntl = 0x%02x\n"
90 KERN_DEBUG " clkdiv = 0x%02x, xfrcnt = 0x%02x\n" 90 " sts = 0x%02x, extsts = 0x%02x\n"
91 KERN_DEBUG " xtcntlss = 0x%02x, directcntl = 0x%02x\n", 91 " clkdiv = 0x%02x, xfrcnt = 0x%02x\n"
92 " xtcntlss = 0x%02x, directcntl = 0x%02x\n",
92 in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts), 93 in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts),
93 in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt), 94 in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt),
94 in_8(&iic->xtcntlss), in_8(&iic->directcntl)); 95 in_8(&iic->xtcntlss), in_8(&iic->directcntl));
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index ad8d2010c921..d258b02aef44 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -672,9 +672,10 @@ omap_i2c_isr(int this_irq, void *dev_id)
672 break; 672 break;
673 } 673 }
674 674
675 err = 0;
676complete:
675 omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat); 677 omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat);
676 678
677 err = 0;
678 if (stat & OMAP_I2C_STAT_NACK) { 679 if (stat & OMAP_I2C_STAT_NACK) {
679 err |= OMAP_I2C_STAT_NACK; 680 err |= OMAP_I2C_STAT_NACK;
680 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 681 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
@@ -685,16 +686,19 @@ omap_i2c_isr(int this_irq, void *dev_id)
685 err |= OMAP_I2C_STAT_AL; 686 err |= OMAP_I2C_STAT_AL;
686 } 687 }
687 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | 688 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
688 OMAP_I2C_STAT_AL)) 689 OMAP_I2C_STAT_AL)) {
689 omap_i2c_complete_cmd(dev, err); 690 omap_i2c_complete_cmd(dev, err);
691 return IRQ_HANDLED;
692 }
690 if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) { 693 if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) {
691 u8 num_bytes = 1; 694 u8 num_bytes = 1;
692 if (dev->fifo_size) { 695 if (dev->fifo_size) {
693 if (stat & OMAP_I2C_STAT_RRDY) 696 if (stat & OMAP_I2C_STAT_RRDY)
694 num_bytes = dev->fifo_size; 697 num_bytes = dev->fifo_size;
695 else 698 else /* read RXSTAT on RDR interrupt */
696 num_bytes = omap_i2c_read_reg(dev, 699 num_bytes = (omap_i2c_read_reg(dev,
697 OMAP_I2C_BUFSTAT_REG); 700 OMAP_I2C_BUFSTAT_REG)
701 >> 8) & 0x3F;
698 } 702 }
699 while (num_bytes) { 703 while (num_bytes) {
700 num_bytes--; 704 num_bytes--;
@@ -731,9 +735,10 @@ omap_i2c_isr(int this_irq, void *dev_id)
731 if (dev->fifo_size) { 735 if (dev->fifo_size) {
732 if (stat & OMAP_I2C_STAT_XRDY) 736 if (stat & OMAP_I2C_STAT_XRDY)
733 num_bytes = dev->fifo_size; 737 num_bytes = dev->fifo_size;
734 else 738 else /* read TXSTAT on XDR interrupt */
735 num_bytes = omap_i2c_read_reg(dev, 739 num_bytes = omap_i2c_read_reg(dev,
736 OMAP_I2C_BUFSTAT_REG); 740 OMAP_I2C_BUFSTAT_REG)
741 & 0x3F;
737 } 742 }
738 while (num_bytes) { 743 while (num_bytes) {
739 num_bytes--; 744 num_bytes--;
@@ -760,6 +765,27 @@ omap_i2c_isr(int this_irq, void *dev_id)
760 "data to send\n"); 765 "data to send\n");
761 break; 766 break;
762 } 767 }
768
769 /*
770 * OMAP3430 Errata 1.153: When an XRDY/XDR
771 * is hit, wait for XUDF before writing data
772 * to DATA_REG. Otherwise some data bytes can
773 * be lost while transferring them from the
774 * memory to the I2C interface.
775 */
776
777 if (cpu_is_omap34xx()) {
778 while (!(stat & OMAP_I2C_STAT_XUDF)) {
779 if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
780 omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
781 err |= OMAP_I2C_STAT_XUDF;
782 goto complete;
783 }
784 cpu_relax();
785 stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
786 }
787 }
788
763 omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w); 789 omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
764 } 790 }
765 omap_i2c_ack_stat(dev, 791 omap_i2c_ack_stat(dev,
@@ -806,7 +832,7 @@ omap_i2c_probe(struct platform_device *pdev)
806 return -ENODEV; 832 return -ENODEV;
807 } 833 }
808 834
809 ioarea = request_mem_region(mem->start, (mem->end - mem->start) + 1, 835 ioarea = request_mem_region(mem->start, resource_size(mem),
810 pdev->name); 836 pdev->name);
811 if (!ioarea) { 837 if (!ioarea) {
812 dev_err(&pdev->dev, "I2C region already claimed\n"); 838 dev_err(&pdev->dev, "I2C region already claimed\n");
@@ -879,7 +905,7 @@ omap_i2c_probe(struct platform_device *pdev)
879 i2c_set_adapdata(adap, dev); 905 i2c_set_adapdata(adap, dev);
880 adap->owner = THIS_MODULE; 906 adap->owner = THIS_MODULE;
881 adap->class = I2C_CLASS_HWMON; 907 adap->class = I2C_CLASS_HWMON;
882 strncpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); 908 strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
883 adap->algo = &omap_i2c_algo; 909 adap->algo = &omap_i2c_algo;
884 adap->dev.parent = &pdev->dev; 910 adap->dev.parent = &pdev->dev;
885 911
@@ -905,7 +931,7 @@ err_free_mem:
905 platform_set_drvdata(pdev, NULL); 931 platform_set_drvdata(pdev, NULL);
906 kfree(dev); 932 kfree(dev);
907err_release_region: 933err_release_region:
908 release_mem_region(mem->start, (mem->end - mem->start) + 1); 934 release_mem_region(mem->start, resource_size(mem));
909 935
910 return r; 936 return r;
911} 937}
@@ -925,7 +951,7 @@ omap_i2c_remove(struct platform_device *pdev)
925 iounmap(dev->base); 951 iounmap(dev->base);
926 kfree(dev); 952 kfree(dev);
927 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 953 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
928 release_mem_region(mem->start, (mem->end - mem->start) + 1); 954 release_mem_region(mem->start, resource_size(mem));
929 return 0; 955 return 0;
930} 956}
931 957
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 8f42a4536cdf..20bb0ceb027b 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -763,11 +763,6 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
763 dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq); 763 dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq);
764 dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon); 764 dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon);
765 765
766 /* check for s3c2440 i2c controller */
767
768 if (s3c24xx_i2c_is2440(i2c))
769 writel(0x0, i2c->regs + S3C2440_IICLC);
770
771 return 0; 766 return 0;
772} 767}
773 768
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 1c01083b01b5..820487d0d5c7 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -563,7 +563,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
563 goto err_irq; 563 goto err_irq;
564 } 564 }
565 565
566 size = (res->end - res->start) + 1; 566 size = resource_size(res);
567 567
568 pd->reg = ioremap(res->start, size); 568 pd->reg = ioremap(res->start, size);
569 if (pd->reg == NULL) { 569 if (pd->reg == NULL) {
@@ -637,7 +637,7 @@ static void __exit sh_mobile_i2c_adap_exit(void)
637 platform_driver_unregister(&sh_mobile_i2c_driver); 637 platform_driver_unregister(&sh_mobile_i2c_driver);
638} 638}
639 639
640module_init(sh_mobile_i2c_adap_init); 640subsys_initcall(sh_mobile_i2c_adap_init);
641module_exit(sh_mobile_i2c_adap_exit); 641module_exit(sh_mobile_i2c_adap_exit);
642 642
643MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver"); 643MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver");
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 042fda295f3a..6407f47bda82 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -92,7 +92,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
92 goto err; 92 goto err;
93 } 93 }
94 94
95 size = (res->end-res->start)+1; 95 size = resource_size(res);
96 96
97 pd->ioarea = request_mem_region(res->start, size, dev->name); 97 pd->ioarea = request_mem_region(res->start, size, dev->name);
98 if (pd->ioarea == NULL) { 98 if (pd->ioarea == NULL) {
diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c
index 1a9cc135219f..b96f3025e588 100644
--- a/drivers/i2c/chips/tsl2550.c
+++ b/drivers/i2c/chips/tsl2550.c
@@ -27,7 +27,7 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28 28
29#define TSL2550_DRV_NAME "tsl2550" 29#define TSL2550_DRV_NAME "tsl2550"
30#define DRIVER_VERSION "1.1.1" 30#define DRIVER_VERSION "1.1.2"
31 31
32/* 32/*
33 * Defines 33 * Defines
@@ -189,13 +189,16 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
189 u8 r = 128; 189 u8 r = 128;
190 190
191 /* Avoid division by 0 and count 1 cannot be greater than count 0 */ 191 /* Avoid division by 0 and count 1 cannot be greater than count 0 */
192 if (c0 && (c1 <= c0)) 192 if (c1 <= c0)
193 r = c1 * 128 / c0; 193 if (c0) {
194 r = c1 * 128 / c0;
195
196 /* Calculate LUX */
197 lux = ((c0 - c1) * ratio_lut[r]) / 256;
198 } else
199 lux = 0;
194 else 200 else
195 return -1; 201 return -EAGAIN;
196
197 /* Calculate LUX */
198 lux = ((c0 - c1) * ratio_lut[r]) / 256;
199 202
200 /* LUX range check */ 203 /* LUX range check */
201 return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; 204 return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index bd066bb9d611..09f98ed0731f 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -135,6 +135,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
135 135
136 ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); 136 ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
137 hw[0].irq = 14; 137 hw[0].irq = 14;
138 hw[1].irq = 15;
138 139
139 return ide_host_add(d, hws, 2, NULL); 140 return ide_host_add(d, hws, 2, NULL);
140} 141}
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 77f79d26b264..c509c9916464 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -92,6 +92,11 @@ int ide_acpi_init(void)
92 return 0; 92 return 0;
93} 93}
94 94
95bool ide_port_acpi(ide_hwif_t *hwif)
96{
97 return ide_noacpi == 0 && hwif->acpidata;
98}
99
95/** 100/**
96 * ide_get_dev_handle - finds acpi_handle and PCI device.function 101 * ide_get_dev_handle - finds acpi_handle and PCI device.function
97 * @dev: device to locate 102 * @dev: device to locate
@@ -352,9 +357,6 @@ int ide_acpi_exec_tfs(ide_drive_t *drive)
352 unsigned long gtf_address; 357 unsigned long gtf_address;
353 unsigned long obj_loc; 358 unsigned long obj_loc;
354 359
355 if (ide_noacpi)
356 return 0;
357
358 DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn); 360 DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn);
359 361
360 ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc); 362 ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc);
@@ -389,16 +391,6 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
389 struct acpi_buffer output; 391 struct acpi_buffer output;
390 union acpi_object *out_obj; 392 union acpi_object *out_obj;
391 393
392 if (ide_noacpi)
393 return;
394
395 DEBPRINT("ENTER:\n");
396
397 if (!hwif->acpidata) {
398 DEBPRINT("no ACPI data for %s\n", hwif->name);
399 return;
400 }
401
402 /* Setting up output buffer for _GTM */ 394 /* Setting up output buffer for _GTM */
403 output.length = ACPI_ALLOCATE_BUFFER; 395 output.length = ACPI_ALLOCATE_BUFFER;
404 output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ 396 output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
@@ -479,16 +471,6 @@ void ide_acpi_push_timing(ide_hwif_t *hwif)
479 struct ide_acpi_drive_link *master = &hwif->acpidata->master; 471 struct ide_acpi_drive_link *master = &hwif->acpidata->master;
480 struct ide_acpi_drive_link *slave = &hwif->acpidata->slave; 472 struct ide_acpi_drive_link *slave = &hwif->acpidata->slave;
481 473
482 if (ide_noacpi)
483 return;
484
485 DEBPRINT("ENTER:\n");
486
487 if (!hwif->acpidata) {
488 DEBPRINT("no ACPI data for %s\n", hwif->name);
489 return;
490 }
491
492 /* Give the GTM buffer + drive Identify data to the channel via the 474 /* Give the GTM buffer + drive Identify data to the channel via the
493 * _STM method: */ 475 * _STM method: */
494 /* setup input parameters buffer for _STM */ 476 /* setup input parameters buffer for _STM */
@@ -527,16 +509,11 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
527 ide_drive_t *drive; 509 ide_drive_t *drive;
528 int i; 510 int i;
529 511
530 if (ide_noacpi || ide_noacpi_psx) 512 if (ide_noacpi_psx)
531 return; 513 return;
532 514
533 DEBPRINT("ENTER:\n"); 515 DEBPRINT("ENTER:\n");
534 516
535 if (!hwif->acpidata) {
536 DEBPRINT("no ACPI data for %s\n", hwif->name);
537 return;
538 }
539
540 /* channel first and then drives for power on and verse versa for power off */ 517 /* channel first and then drives for power on and verse versa for power off */
541 if (on) 518 if (on)
542 acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0); 519 acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0);
@@ -616,7 +593,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
616 drive->name, err); 593 drive->name, err);
617 } 594 }
618 595
619 if (!ide_acpionboot) { 596 if (ide_noacpi || ide_acpionboot == 0) {
620 DEBPRINT("ACPI methods disabled on boot\n"); 597 DEBPRINT("ACPI methods disabled on boot\n");
621 return; 598 return;
622 } 599 }
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 4a19686fcfe9..6a9a769bffc1 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -592,9 +592,19 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
592 } 592 }
593 } else if (!blk_pc_request(rq)) { 593 } else if (!blk_pc_request(rq)) {
594 ide_cd_request_sense_fixup(drive, cmd); 594 ide_cd_request_sense_fixup(drive, cmd);
595 /* complain if we still have data left to transfer */ 595
596 uptodate = cmd->nleft ? 0 : 1; 596 uptodate = cmd->nleft ? 0 : 1;
597 if (uptodate == 0) 597
598 /*
599 * suck out the remaining bytes from the drive in an
600 * attempt to complete the data xfer. (see BZ#13399)
601 */
602 if (!(stat & ATA_ERR) && !uptodate && thislen) {
603 ide_pio_bytes(drive, cmd, write, thislen);
604 uptodate = cmd->nleft ? 0 : 1;
605 }
606
607 if (!uptodate)
598 rq->cmd_flags |= REQ_FAILED; 608 rq->cmd_flags |= REQ_FAILED;
599 } 609 }
600 goto out_end; 610 goto out_end;
@@ -876,9 +886,12 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
876 return stat; 886 return stat;
877 887
878 /* 888 /*
879 * Sanity check the given block size 889 * Sanity check the given block size, in so far as making
890 * sure the sectors_per_frame we give to the caller won't
891 * end up being bogus.
880 */ 892 */
881 blocklen = be32_to_cpu(capbuf.blocklen); 893 blocklen = be32_to_cpu(capbuf.blocklen);
894 blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS;
882 switch (blocklen) { 895 switch (blocklen) {
883 case 512: 896 case 512:
884 case 1024: 897 case 1024:
@@ -886,10 +899,9 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
886 case 4096: 899 case 4096:
887 break; 900 break;
888 default: 901 default:
889 printk(KERN_ERR PFX "%s: weird block size %u\n", 902 printk_once(KERN_ERR PFX "%s: weird block size %u; "
903 "setting default block size to 2048\n",
890 drive->name, blocklen); 904 drive->name, blocklen);
891 printk(KERN_ERR PFX "%s: default to 2kb block size\n",
892 drive->name);
893 blocklen = 2048; 905 blocklen = 2048;
894 break; 906 break;
895 } 907 }
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 5bf958e5b1d5..1099bf7cf968 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -183,6 +183,6 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
183 err = setfunc(drive, *(int *)&rq->cmd[1]); 183 err = setfunc(drive, *(int *)&rq->cmd[1]);
184 if (err) 184 if (err)
185 rq->errors = err; 185 rq->errors = err;
186 ide_complete_rq(drive, err, ide_rq_bytes(rq)); 186 ide_complete_rq(drive, err, blk_rq_bytes(rq));
187 return ide_stopped; 187 return ide_stopped;
188} 188}
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 695181120cdb..7f878017b736 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -455,6 +455,7 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
455 455
456 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 456 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
457 rq->special = cmd; 457 rq->special = cmd;
458 cmd->rq = rq;
458} 459}
459 460
460ide_devset_get(multcount, mult_count); 461ide_devset_get(multcount, mult_count);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 219e6fb78dc6..ee58c88dee5a 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -361,9 +361,6 @@ static int ide_tune_dma(ide_drive_t *drive)
361 if (__ide_dma_bad_drive(drive)) 361 if (__ide_dma_bad_drive(drive))
362 return 0; 362 return 0;
363 363
364 if (ide_id_dma_bug(drive))
365 return 0;
366
367 if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) 364 if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
368 return config_drive_for_dma(drive); 365 return config_drive_for_dma(drive);
369 366
@@ -394,24 +391,6 @@ static int ide_dma_check(ide_drive_t *drive)
394 return -1; 391 return -1;
395} 392}
396 393
397int ide_id_dma_bug(ide_drive_t *drive)
398{
399 u16 *id = drive->id;
400
401 if (id[ATA_ID_FIELD_VALID] & 4) {
402 if ((id[ATA_ID_UDMA_MODES] >> 8) &&
403 (id[ATA_ID_MWDMA_MODES] >> 8))
404 goto err_out;
405 } else if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
406 (id[ATA_ID_SWDMA_MODES] >> 8))
407 goto err_out;
408
409 return 0;
410err_out:
411 printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
412 return 1;
413}
414
415int ide_set_dma(ide_drive_t *drive) 394int ide_set_dma(ide_drive_t *drive)
416{ 395{
417 int rc; 396 int rc;
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 2b9141979613..e9abf2c3c335 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -149,7 +149,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
149 if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) { 149 if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) {
150 if (err <= 0 && rq->errors == 0) 150 if (err <= 0 && rq->errors == 0)
151 rq->errors = -EIO; 151 rq->errors = -EIO;
152 ide_complete_rq(drive, err ? err : 0, ide_rq_bytes(rq)); 152 ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
153 } 153 }
154} 154}
155 155
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 8b3f204f7d73..fefbdfc8db06 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -293,7 +293,7 @@ out_end:
293 drive->failed_pc = NULL; 293 drive->failed_pc = NULL;
294 if (blk_fs_request(rq) == 0 && rq->errors == 0) 294 if (blk_fs_request(rq) == 0 && rq->errors == 0)
295 rq->errors = -EIO; 295 rq->errors = -EIO;
296 ide_complete_rq(drive, -EIO, ide_rq_bytes(rq)); 296 ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
297 return ide_stopped; 297 return ide_stopped;
298} 298}
299 299
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 1059f809b809..db96138fefcd 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -112,16 +112,6 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
112 } 112 }
113} 113}
114 114
115/* obsolete, blk_rq_bytes() should be used instead */
116unsigned int ide_rq_bytes(struct request *rq)
117{
118 if (blk_pc_request(rq))
119 return blk_rq_bytes(rq);
120 else
121 return blk_rq_cur_sectors(rq) << 9;
122}
123EXPORT_SYMBOL_GPL(ide_rq_bytes);
124
125int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) 115int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
126{ 116{
127 ide_hwif_t *hwif = drive->hwif; 117 ide_hwif_t *hwif = drive->hwif;
@@ -152,14 +142,14 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
152 142
153 if ((media == ide_floppy || media == ide_tape) && drv_req) { 143 if ((media == ide_floppy || media == ide_tape) && drv_req) {
154 rq->errors = 0; 144 rq->errors = 0;
155 ide_complete_rq(drive, 0, blk_rq_bytes(rq));
156 } else { 145 } else {
157 if (media == ide_tape) 146 if (media == ide_tape)
158 rq->errors = IDE_DRV_ERROR_GENERAL; 147 rq->errors = IDE_DRV_ERROR_GENERAL;
159 else if (blk_fs_request(rq) == 0 && rq->errors == 0) 148 else if (blk_fs_request(rq) == 0 && rq->errors == 0)
160 rq->errors = -EIO; 149 rq->errors = -EIO;
161 ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
162 } 150 }
151
152 ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
163} 153}
164 154
165static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 155static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 82f252c3ee6e..e246d3d3fbcc 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -64,7 +64,8 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
64 goto out; 64 goto out;
65 } 65 }
66 66
67 id = kmalloc(size, GFP_KERNEL); 67 /* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */
68 id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
68 if (id == NULL) { 69 if (id == NULL) {
69 rc = -ENOMEM; 70 rc = -ENOMEM;
70 goto out; 71 goto out;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index fa047150a1c6..2892b242bbe1 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -210,6 +210,7 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list);
210 */ 210 */
211static const struct drive_list_entry ivb_list[] = { 211static const struct drive_list_entry ivb_list[] = {
212 { "QUANTUM FIREBALLlct10 05" , "A03.0900" }, 212 { "QUANTUM FIREBALLlct10 05" , "A03.0900" },
213 { "QUANTUM FIREBALLlct20 30" , "APL.0900" },
213 { "TSSTcorp CDDVDW SH-S202J" , "SB00" }, 214 { "TSSTcorp CDDVDW SH-S202J" , "SB00" },
214 { "TSSTcorp CDDVDW SH-S202J" , "SB01" }, 215 { "TSSTcorp CDDVDW SH-S202J" , "SB01" },
215 { "TSSTcorp CDDVDW SH-S202N" , "SB00" }, 216 { "TSSTcorp CDDVDW SH-S202N" , "SB00" },
@@ -329,9 +330,6 @@ int ide_driveid_update(ide_drive_t *drive)
329 330
330 kfree(id); 331 kfree(id);
331 332
332 if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive))
333 ide_dma_off(drive);
334
335 return 1; 333 return 1;
336out_err: 334out_err:
337 if (rc == 2) 335 if (rc == 2)
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index c14ca144cffe..ad7be2669dcb 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -10,9 +10,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
10 struct request_pm_state rqpm; 10 struct request_pm_state rqpm;
11 int ret; 11 int ret;
12 12
13 /* call ACPI _GTM only once */ 13 if (ide_port_acpi(hwif)) {
14 if ((drive->dn & 1) == 0 || pair == NULL) 14 /* call ACPI _GTM only once */
15 ide_acpi_get_timing(hwif); 15 if ((drive->dn & 1) == 0 || pair == NULL)
16 ide_acpi_get_timing(hwif);
17 }
16 18
17 memset(&rqpm, 0, sizeof(rqpm)); 19 memset(&rqpm, 0, sizeof(rqpm));
18 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 20 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
@@ -26,9 +28,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
26 ret = blk_execute_rq(drive->queue, NULL, rq, 0); 28 ret = blk_execute_rq(drive->queue, NULL, rq, 0);
27 blk_put_request(rq); 29 blk_put_request(rq);
28 30
29 /* call ACPI _PS3 only after both devices are suspended */ 31 if (ret == 0 && ide_port_acpi(hwif)) {
30 if (ret == 0 && ((drive->dn & 1) || pair == NULL)) 32 /* call ACPI _PS3 only after both devices are suspended */
31 ide_acpi_set_state(hwif, 0); 33 if ((drive->dn & 1) || pair == NULL)
34 ide_acpi_set_state(hwif, 0);
35 }
32 36
33 return ret; 37 return ret;
34} 38}
@@ -42,13 +46,15 @@ int generic_ide_resume(struct device *dev)
42 struct request_pm_state rqpm; 46 struct request_pm_state rqpm;
43 int err; 47 int err;
44 48
45 /* call ACPI _PS0 / _STM only once */ 49 if (ide_port_acpi(hwif)) {
46 if ((drive->dn & 1) == 0 || pair == NULL) { 50 /* call ACPI _PS0 / _STM only once */
47 ide_acpi_set_state(hwif, 1); 51 if ((drive->dn & 1) == 0 || pair == NULL) {
48 ide_acpi_push_timing(hwif); 52 ide_acpi_set_state(hwif, 1);
49 } 53 ide_acpi_push_timing(hwif);
54 }
50 55
51 ide_acpi_exec_tfs(drive); 56 ide_acpi_exec_tfs(drive);
57 }
52 58
53 memset(&rqpm, 0, sizeof(rqpm)); 59 memset(&rqpm, 0, sizeof(rqpm));
54 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 60 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 51af4eea0d36..1bb106f6221a 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -818,6 +818,24 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
818 return j; 818 return j;
819} 819}
820 820
821static void ide_host_enable_irqs(struct ide_host *host)
822{
823 ide_hwif_t *hwif;
824 int i;
825
826 ide_host_for_each_port(i, hwif, host) {
827 if (hwif == NULL)
828 continue;
829
830 /* clear any pending IRQs */
831 hwif->tp_ops->read_status(hwif);
832
833 /* unmask IRQs */
834 if (hwif->io_ports.ctl_addr)
835 hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
836 }
837}
838
821/* 839/*
822 * This routine sets up the IRQ for an IDE interface. 840 * This routine sets up the IRQ for an IDE interface.
823 */ 841 */
@@ -831,9 +849,6 @@ static int init_irq (ide_hwif_t *hwif)
831 if (irq_handler == NULL) 849 if (irq_handler == NULL)
832 irq_handler = ide_intr; 850 irq_handler = ide_intr;
833 851
834 if (io_ports->ctl_addr)
835 hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
836
837 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) 852 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
838 goto out_up; 853 goto out_up;
839 854
@@ -1404,6 +1419,8 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1404 ide_port_tune_devices(hwif); 1419 ide_port_tune_devices(hwif);
1405 } 1420 }
1406 1421
1422 ide_host_enable_irqs(host);
1423
1407 ide_host_for_each_port(i, hwif, host) { 1424 ide_host_for_each_port(i, hwif, host) {
1408 if (hwif == NULL) 1425 if (hwif == NULL)
1409 continue; 1426 continue;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 013dc595fab6..bc5fb12b913c 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1064,6 +1064,7 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
1064 tape->best_dsc_rw_freq = config.dsc_rw_frequency; 1064 tape->best_dsc_rw_freq = config.dsc_rw_frequency;
1065 break; 1065 break;
1066 case 0x0350: 1066 case 0x0350:
1067 memset(&config, 0, sizeof(config));
1067 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq; 1068 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
1068 config.nr_stages = 1; 1069 config.nr_stages = 1;
1069 if (copy_to_user(argp, &config, sizeof(config))) 1070 if (copy_to_user(argp, &config, sizeof(config)))
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 83b734aec923..52b25f8b111d 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -880,6 +880,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
880 } 880 }
881 881
882 shost->hostdata[0] = (unsigned long)lu; 882 shost->hostdata[0] = (unsigned long)lu;
883 shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
883 884
884 if (!scsi_add_host(shost, &ud->device)) { 885 if (!scsi_add_host(shost, &ud->device)) {
885 lu->shost = shost; 886 lu->shost = shost;
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index c5036f1cc5b0..64a3a66a8a39 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -25,6 +25,12 @@
25#define SBP2_DEVICE_NAME "sbp2" 25#define SBP2_DEVICE_NAME "sbp2"
26 26
27/* 27/*
28 * There is no transport protocol limit to the CDB length, but we implement
29 * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
30 */
31#define SBP2_MAX_CDB_SIZE 16
32
33/*
28 * SBP-2 specific definitions 34 * SBP-2 specific definitions
29 */ 35 */
30 36
@@ -51,7 +57,7 @@ struct sbp2_command_orb {
51 u32 data_descriptor_hi; 57 u32 data_descriptor_hi;
52 u32 data_descriptor_lo; 58 u32 data_descriptor_lo;
53 u32 misc; 59 u32 misc;
54 u8 cdb[12]; 60 u8 cdb[SBP2_MAX_CDB_SIZE];
55} __attribute__((packed)); 61} __attribute__((packed));
56 62
57#define SBP2_LOGIN_REQUEST 0x0 63#define SBP2_LOGIN_REQUEST 0x0
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 114efd8dc8f5..1148140d08a1 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -608,8 +608,7 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
608 p, compat_mode); 608 p, compat_mode);
609 609
610 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) 610 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0)))
611 return str_to_user(dev_name(&evdev->dev), 611 return str_to_user(dev->name, _IOC_SIZE(cmd), p);
612 _IOC_SIZE(cmd), p);
613 612
614 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) 613 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0)))
615 return str_to_user(dev->phys, _IOC_SIZE(cmd), p); 614 return str_to_user(dev->phys, _IOC_SIZE(cmd), p);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 0e12f89276a3..4cfd084fa897 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -536,7 +536,7 @@ static int joydev_ioctl_common(struct joydev *joydev,
536 default: 536 default:
537 if ((cmd & ~IOCSIZE_MASK) == JSIOCGNAME(0)) { 537 if ((cmd & ~IOCSIZE_MASK) == JSIOCGNAME(0)) {
538 int len; 538 int len;
539 const char *name = dev_name(&dev->dev); 539 const char *name = dev->name;
540 540
541 if (!name) 541 if (!name)
542 return 0; 542 return 0;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index b868b8d5fbb3..f155ad8cdae7 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -470,20 +470,20 @@ static void xpad_irq_out(struct urb *urb)
470 status = urb->status; 470 status = urb->status;
471 471
472 switch (status) { 472 switch (status) {
473 case 0: 473 case 0:
474 /* success */ 474 /* success */
475 break; 475 return;
476 case -ECONNRESET: 476
477 case -ENOENT: 477 case -ECONNRESET:
478 case -ESHUTDOWN: 478 case -ENOENT:
479 /* this urb is terminated, clean up */ 479 case -ESHUTDOWN:
480 dbg("%s - urb shutting down with status: %d", 480 /* this urb is terminated, clean up */
481 __func__, status); 481 dbg("%s - urb shutting down with status: %d", __func__, status);
482 return; 482 return;
483 default: 483
484 dbg("%s - nonzero urb status received: %d", 484 default:
485 __func__, status); 485 dbg("%s - nonzero urb status received: %d", __func__, status);
486 goto exit; 486 goto exit;
487 } 487 }
488 488
489exit: 489exit:
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 9d8f796c6745..a6b989a9dc07 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -12,6 +12,42 @@ menuconfig INPUT_KEYBOARD
12 12
13if INPUT_KEYBOARD 13if INPUT_KEYBOARD
14 14
15config KEYBOARD_AAED2000
16 tristate "AAED-2000 keyboard"
17 depends on MACH_AAED2000
18 select INPUT_POLLDEV
19 default y
20 help
21 Say Y here to enable the keyboard on the Agilent AAED-2000
22 development board.
23
24 To compile this driver as a module, choose M here: the
25 module will be called aaed2000_kbd.
26
27config KEYBOARD_AMIGA
28 tristate "Amiga keyboard"
29 depends on AMIGA
30 help
31 Say Y here if you are running Linux on any AMIGA and have a keyboard
32 attached.
33
34 To compile this driver as a module, choose M here: the
35 module will be called amikbd.
36
37config ATARI_KBD_CORE
38 bool
39
40config KEYBOARD_ATARI
41 tristate "Atari keyboard"
42 depends on ATARI
43 select ATARI_KBD_CORE
44 help
45 Say Y here if you are running Linux on any Atari and have a keyboard
46 attached.
47
48 To compile this driver as a module, choose M here: the
49 module will be called atakbd.
50
15config KEYBOARD_ATKBD 51config KEYBOARD_ATKBD
16 tristate "AT keyboard" if EMBEDDED || !X86 52 tristate "AT keyboard" if EMBEDDED || !X86
17 default y 53 default y
@@ -68,69 +104,14 @@ config KEYBOARD_ATKBD_RDI_KEYCODES
68 right-hand column will be interpreted as the key shown in the 104 right-hand column will be interpreted as the key shown in the
69 left-hand column. 105 left-hand column.
70 106
71config KEYBOARD_SUNKBD 107config KEYBOARD_BFIN
72 tristate "Sun Type 4 and Type 5 keyboard" 108 tristate "Blackfin BF54x keypad support"
73 select SERIO 109 depends on (BF54x && !BF544)
74 help
75 Say Y here if you want to use a Sun Type 4 or Type 5 keyboard,
76 connected either to the Sun keyboard connector or to an serial
77 (RS-232) port via a simple adapter.
78
79 To compile this driver as a module, choose M here: the
80 module will be called sunkbd.
81
82config KEYBOARD_LKKBD
83 tristate "DECstation/VAXstation LK201/LK401 keyboard"
84 select SERIO
85 help
86 Say Y here if you want to use a LK201 or LK401 style serial
87 keyboard. This keyboard is also useable on PCs if you attach
88 it with the inputattach program. The connector pinout is
89 described within lkkbd.c.
90
91 To compile this driver as a module, choose M here: the
92 module will be called lkkbd.
93
94config KEYBOARD_LOCOMO
95 tristate "LoCoMo Keyboard Support"
96 depends on SHARP_LOCOMO && INPUT_KEYBOARD
97 help
98 Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA
99
100 To compile this driver as a module, choose M here: the
101 module will be called locomokbd.
102
103config KEYBOARD_XTKBD
104 tristate "XT keyboard"
105 select SERIO
106 help
107 Say Y here if you want to use the old IBM PC/XT keyboard (or
108 compatible) on your system. This is only possible with a
109 parallel port keyboard adapter, you cannot connect it to the
110 keyboard port on a PC that runs Linux.
111
112 To compile this driver as a module, choose M here: the
113 module will be called xtkbd.
114
115config KEYBOARD_NEWTON
116 tristate "Newton keyboard"
117 select SERIO
118 help
119 Say Y here if you have a Newton keyboard on a serial port.
120
121 To compile this driver as a module, choose M here: the
122 module will be called newtonkbd.
123
124config KEYBOARD_STOWAWAY
125 tristate "Stowaway keyboard"
126 select SERIO
127 help 110 help
128 Say Y here if you have a Stowaway keyboard on a serial port. 111 Say Y here if you want to use the BF54x keypad.
129 Stowaway compatible keyboards like Dicota Input-PDA keyboard
130 are also supported by this driver.
131 112
132 To compile this driver as a module, choose M here: the 113 To compile this driver as a module, choose M here: the
133 module will be called stowaway. 114 module will be called bf54x-keys.
134 115
135config KEYBOARD_CORGI 116config KEYBOARD_CORGI
136 tristate "Corgi keyboard" 117 tristate "Corgi keyboard"
@@ -143,61 +124,50 @@ config KEYBOARD_CORGI
143 To compile this driver as a module, choose M here: the 124 To compile this driver as a module, choose M here: the
144 module will be called corgikbd. 125 module will be called corgikbd.
145 126
146config KEYBOARD_SPITZ 127config KEYBOARD_LKKBD
147 tristate "Spitz keyboard" 128 tristate "DECstation/VAXstation LK201/LK401 keyboard"
148 depends on PXA_SHARPSL 129 select SERIO
149 default y
150 help 130 help
151 Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000, 131 Say Y here if you want to use a LK201 or LK401 style serial
152 SL-C3000 and Sl-C3100 series of PDAs. 132 keyboard. This keyboard is also useable on PCs if you attach
133 it with the inputattach program. The connector pinout is
134 described within lkkbd.c.
153 135
154 To compile this driver as a module, choose M here: the 136 To compile this driver as a module, choose M here: the
155 module will be called spitzkbd. 137 module will be called lkkbd.
156 138
157config KEYBOARD_TOSA 139config KEYBOARD_EP93XX
158 tristate "Tosa keyboard" 140 tristate "EP93xx Matrix Keypad support"
159 depends on MACH_TOSA 141 depends on ARCH_EP93XX
160 default y
161 help 142 help
162 Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa) 143 Say Y here to enable the matrix keypad on the Cirrus EP93XX.
163 144
164 To compile this driver as a module, choose M here: the 145 To compile this driver as a module, choose M here: the
165 module will be called tosakbd. 146 module will be called ep93xx_keypad.
166 147
167config KEYBOARD_TOSA_USE_EXT_KEYCODES 148config KEYBOARD_GPIO
168 bool "Tosa keyboard: use extended keycodes" 149 tristate "GPIO Buttons"
169 depends on KEYBOARD_TOSA 150 depends on GENERIC_GPIO
170 default n
171 help 151 help
172 Say Y here to enable the tosa keyboard driver to generate extended 152 This driver implements support for buttons connected
173 (>= 127) keycodes. Be aware, that they can't be correctly interpreted 153 to GPIO pins of various CPUs (and some other chips).
174 by either console keyboard driver or by Kdrive keybd driver.
175
176 Say Y only if you know, what you are doing!
177 154
178config KEYBOARD_AMIGA 155 Say Y here if your device has buttons connected
179 tristate "Amiga keyboard" 156 directly to such GPIO pins. Your board-specific
180 depends on AMIGA 157 setup logic must also provide a platform device,
181 help 158 with configuration data saying which GPIOs are used.
182 Say Y here if you are running Linux on any AMIGA and have a keyboard
183 attached.
184 159
185 To compile this driver as a module, choose M here: the 160 To compile this driver as a module, choose M here: the
186 module will be called amikbd. 161 module will be called gpio_keys.
187 162
188config ATARI_KBD_CORE 163config KEYBOARD_MATRIX
189 bool 164 tristate "GPIO driven matrix keypad support"
190 165 depends on GENERIC_GPIO
191config KEYBOARD_ATARI
192 tristate "Atari keyboard"
193 depends on ATARI
194 select ATARI_KBD_CORE
195 help 166 help
196 Say Y here if you are running Linux on any Atari and have a keyboard 167 Enable support for GPIO driven matrix keypad.
197 attached.
198 168
199 To compile this driver as a module, choose M here: the 169 To compile this driver as a module, choose M here: the
200 module will be called atakbd. 170 module will be called matrix_keypad.
201 171
202config KEYBOARD_HIL_OLD 172config KEYBOARD_HIL_OLD
203 tristate "HP HIL keyboard support (simple driver)" 173 tristate "HP HIL keyboard support (simple driver)"
@@ -261,20 +231,39 @@ config KEYBOARD_LM8323
261 To compile this driver as a module, choose M here: the 231 To compile this driver as a module, choose M here: the
262 module will be called lm8323. 232 module will be called lm8323.
263 233
264config KEYBOARD_OMAP 234config KEYBOARD_LOCOMO
265 tristate "TI OMAP keypad support" 235 tristate "LoCoMo Keyboard Support"
266 depends on (ARCH_OMAP1 || ARCH_OMAP2) 236 depends on SHARP_LOCOMO
267 help 237 help
268 Say Y here if you want to use the OMAP keypad. 238 Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA
269 239
270 To compile this driver as a module, choose M here: the 240 To compile this driver as a module, choose M here: the
271 module will be called omap-keypad. 241 module will be called locomokbd.
242
243config KEYBOARD_MAPLE
244 tristate "Maple bus keyboard"
245 depends on SH_DREAMCAST && MAPLE
246 help
247 Say Y here if you have a Dreamcast console running Linux and have
248 a keyboard attached to its Maple bus.
249
250 To compile this driver as a module, choose M here: the
251 module will be called maple_keyb.
252
253config KEYBOARD_NEWTON
254 tristate "Newton keyboard"
255 select SERIO
256 help
257 Say Y here if you have a Newton keyboard on a serial port.
258
259 To compile this driver as a module, choose M here: the
260 module will be called newtonkbd.
272 261
273config KEYBOARD_PXA27x 262config KEYBOARD_PXA27x
274 tristate "PXA27x/PXA3xx keypad support" 263 tristate "PXA27x/PXA3xx keypad support"
275 depends on PXA27x || PXA3xx 264 depends on PXA27x || PXA3xx
276 help 265 help
277 Enable support for PXA27x/PXA3xx keypad controller 266 Enable support for PXA27x/PXA3xx keypad controller.
278 267
279 To compile this driver as a module, choose M here: the 268 To compile this driver as a module, choose M here: the
280 module will be called pxa27x_keypad. 269 module will be called pxa27x_keypad.
@@ -288,51 +277,38 @@ config KEYBOARD_PXA930_ROTARY
288 To compile this driver as a module, choose M here: the 277 To compile this driver as a module, choose M here: the
289 module will be called pxa930_rotary. 278 module will be called pxa930_rotary.
290 279
291config KEYBOARD_AAED2000 280config KEYBOARD_SPITZ
292 tristate "AAED-2000 keyboard" 281 tristate "Spitz keyboard"
293 depends on MACH_AAED2000 282 depends on PXA_SHARPSL
294 select INPUT_POLLDEV
295 default y 283 default y
296 help 284 help
297 Say Y here to enable the keyboard on the Agilent AAED-2000 285 Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
298 development board. 286 SL-C3000 and Sl-C3100 series of PDAs.
299
300 To compile this driver as a module, choose M here: the
301 module will be called aaed2000_kbd.
302
303config KEYBOARD_GPIO
304 tristate "GPIO Buttons"
305 depends on GENERIC_GPIO
306 help
307 This driver implements support for buttons connected
308 to GPIO pins of various CPUs (and some other chips).
309
310 Say Y here if your device has buttons connected
311 directly to such GPIO pins. Your board-specific
312 setup logic must also provide a platform device,
313 with configuration data saying which GPIOs are used.
314 287
315 To compile this driver as a module, choose M here: the 288 To compile this driver as a module, choose M here: the
316 module will be called gpio-keys. 289 module will be called spitzkbd.
317 290
318config KEYBOARD_MAPLE 291config KEYBOARD_STOWAWAY
319 tristate "Maple bus keyboard" 292 tristate "Stowaway keyboard"
320 depends on SH_DREAMCAST && MAPLE 293 select SERIO
321 help 294 help
322 Say Y here if you have a Dreamcast console running Linux and have 295 Say Y here if you have a Stowaway keyboard on a serial port.
323 a keyboard attached to its Maple bus. 296 Stowaway compatible keyboards like Dicota Input-PDA keyboard
297 are also supported by this driver.
324 298
325 To compile this driver as a module, choose M here: the 299 To compile this driver as a module, choose M here: the
326 module will be called maple_keyb. 300 module will be called stowaway.
327 301
328config KEYBOARD_BFIN 302config KEYBOARD_SUNKBD
329 tristate "Blackfin BF54x keypad support" 303 tristate "Sun Type 4 and Type 5 keyboard"
330 depends on (BF54x && !BF544) 304 select SERIO
331 help 305 help
332 Say Y here if you want to use the BF54x keypad. 306 Say Y here if you want to use a Sun Type 4 or Type 5 keyboard,
307 connected either to the Sun keyboard connector or to an serial
308 (RS-232) port via a simple adapter.
333 309
334 To compile this driver as a module, choose M here: the 310 To compile this driver as a module, choose M here: the
335 module will be called bf54x-keys. 311 module will be called sunkbd.
336 312
337config KEYBOARD_SH_KEYSC 313config KEYBOARD_SH_KEYSC
338 tristate "SuperH KEYSC keypad support" 314 tristate "SuperH KEYSC keypad support"
@@ -344,13 +320,45 @@ config KEYBOARD_SH_KEYSC
344 To compile this driver as a module, choose M here: the 320 To compile this driver as a module, choose M here: the
345 module will be called sh_keysc. 321 module will be called sh_keysc.
346 322
347config KEYBOARD_EP93XX 323config KEYBOARD_OMAP
348 tristate "EP93xx Matrix Keypad support" 324 tristate "TI OMAP keypad support"
349 depends on ARCH_EP93XX 325 depends on (ARCH_OMAP1 || ARCH_OMAP2)
350 help 326 help
351 Say Y here to enable the matrix keypad on the Cirrus EP93XX. 327 Say Y here if you want to use the OMAP keypad.
352 328
353 To compile this driver as a module, choose M here: the 329 To compile this driver as a module, choose M here: the
354 module will be called ep93xx_keypad. 330 module will be called omap-keypad.
331
332config KEYBOARD_TOSA
333 tristate "Tosa keyboard"
334 depends on MACH_TOSA
335 default y
336 help
337 Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
338
339 To compile this driver as a module, choose M here: the
340 module will be called tosakbd.
341
342config KEYBOARD_TOSA_USE_EXT_KEYCODES
343 bool "Tosa keyboard: use extended keycodes"
344 depends on KEYBOARD_TOSA
345 help
346 Say Y here to enable the tosa keyboard driver to generate extended
347 (>= 127) keycodes. Be aware, that they can't be correctly interpreted
348 by either console keyboard driver or by Kdrive keybd driver.
349
350 Say Y only if you know, what you are doing!
351
352config KEYBOARD_XTKBD
353 tristate "XT keyboard"
354 select SERIO
355 help
356 Say Y here if you want to use the old IBM PC/XT keyboard (or
357 compatible) on your system. This is only possible with a
358 parallel port keyboard adapter, you cannot connect it to the
359 keyboard port on a PC that runs Linux.
360
361 To compile this driver as a module, choose M here: the
362 module will be called xtkbd.
355 363
356endif 364endif
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 156b647a259b..b5b5eae9724f 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -4,29 +4,30 @@
4 4
5# Each configuration option enables a list of files. 5# Each configuration option enables a list of files.
6 6
7obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o 7obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
8obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
9obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
10obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
11obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o 8obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
12obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o 9obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
13obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o 10obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
14obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o 11obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
15obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
16obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o 12obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
17obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o 13obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
18obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o 14obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
19obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o 15obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
20obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o 16obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
17obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
18obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
19obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
21obj-$(CONFIG_KEYBOARD_LM8323) += lm8323.o 20obj-$(CONFIG_KEYBOARD_LM8323) += lm8323.o
21obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
22obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
23obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
24obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
22obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o 25obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
23obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o 26obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
24obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o 27obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
25obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
26obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
27obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
28obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
29obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
30obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
31obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o 28obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
32obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o 29obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
30obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
31obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
32obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
33obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index df3f8aa68115..95fe0452dae4 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -895,6 +895,13 @@ static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = {
895}; 895};
896 896
897/* 897/*
898 * Amilo Pi 3525 key release for Fn+Volume keys not working
899 */
900static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = {
901 0x20, 0xa0, 0x2e, 0xae, 0x30, 0xb0, -1U
902};
903
904/*
898 * Amilo Xi 3650 key release for light touch bar not working 905 * Amilo Xi 3650 key release for light touch bar not working
899 */ 906 */
900static unsigned int atkbd_amilo_xi3650_forced_release_keys[] = { 907static unsigned int atkbd_amilo_xi3650_forced_release_keys[] = {
@@ -902,6 +909,13 @@ static unsigned int atkbd_amilo_xi3650_forced_release_keys[] = {
902}; 909};
903 910
904/* 911/*
912 * Soltech TA12 system with broken key release on volume keys and mute key
913 */
914static unsigned int atkdb_soltech_ta12_forced_release_keys[] = {
915 0xa0, 0xae, 0xb0, -1U
916};
917
918/*
905 * atkbd_set_keycode_table() initializes keyboard's keycode table 919 * atkbd_set_keycode_table() initializes keyboard's keycode table
906 * according to the selected scancode set 920 * according to the selected scancode set
907 */ 921 */
@@ -1568,6 +1582,15 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1568 .driver_data = atkbd_amilo_pa1510_forced_release_keys, 1582 .driver_data = atkbd_amilo_pa1510_forced_release_keys,
1569 }, 1583 },
1570 { 1584 {
1585 .ident = "Fujitsu Amilo Pi 3525",
1586 .matches = {
1587 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1588 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 3525"),
1589 },
1590 .callback = atkbd_setup_forced_release,
1591 .driver_data = atkbd_amilo_pi3525_forced_release_keys,
1592 },
1593 {
1571 .ident = "Fujitsu Amilo Xi 3650", 1594 .ident = "Fujitsu Amilo Xi 3650",
1572 .matches = { 1595 .matches = {
1573 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), 1596 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
@@ -1576,6 +1599,15 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1576 .callback = atkbd_setup_forced_release, 1599 .callback = atkbd_setup_forced_release,
1577 .driver_data = atkbd_amilo_xi3650_forced_release_keys, 1600 .driver_data = atkbd_amilo_xi3650_forced_release_keys,
1578 }, 1601 },
1602 {
1603 .ident = "Soltech Corporation TA12",
1604 .matches = {
1605 DMI_MATCH(DMI_SYS_VENDOR, "Soltech Corporation"),
1606 DMI_MATCH(DMI_PRODUCT_NAME, "TA12"),
1607 },
1608 .callback = atkbd_setup_forced_release,
1609 .driver_data = atkdb_soltech_ta12_forced_release_keys,
1610 },
1579 { } 1611 { }
1580}; 1612};
1581 1613
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 2157cd7de00c..efed0c9e242e 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -29,7 +29,8 @@
29struct gpio_button_data { 29struct gpio_button_data {
30 struct gpio_keys_button *button; 30 struct gpio_keys_button *button;
31 struct input_dev *input; 31 struct input_dev *input;
32 struct delayed_work work; 32 struct timer_list timer;
33 struct work_struct work;
33}; 34};
34 35
35struct gpio_keys_drvdata { 36struct gpio_keys_drvdata {
@@ -40,7 +41,7 @@ struct gpio_keys_drvdata {
40static void gpio_keys_report_event(struct work_struct *work) 41static void gpio_keys_report_event(struct work_struct *work)
41{ 42{
42 struct gpio_button_data *bdata = 43 struct gpio_button_data *bdata =
43 container_of(work, struct gpio_button_data, work.work); 44 container_of(work, struct gpio_button_data, work);
44 struct gpio_keys_button *button = bdata->button; 45 struct gpio_keys_button *button = bdata->button;
45 struct input_dev *input = bdata->input; 46 struct input_dev *input = bdata->input;
46 unsigned int type = button->type ?: EV_KEY; 47 unsigned int type = button->type ?: EV_KEY;
@@ -50,17 +51,25 @@ static void gpio_keys_report_event(struct work_struct *work)
50 input_sync(input); 51 input_sync(input);
51} 52}
52 53
54static void gpio_keys_timer(unsigned long _data)
55{
56 struct gpio_button_data *data = (struct gpio_button_data *)_data;
57
58 schedule_work(&data->work);
59}
60
53static irqreturn_t gpio_keys_isr(int irq, void *dev_id) 61static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
54{ 62{
55 struct gpio_button_data *bdata = dev_id; 63 struct gpio_button_data *bdata = dev_id;
56 struct gpio_keys_button *button = bdata->button; 64 struct gpio_keys_button *button = bdata->button;
57 unsigned long delay;
58 65
59 BUG_ON(irq != gpio_to_irq(button->gpio)); 66 BUG_ON(irq != gpio_to_irq(button->gpio));
60 67
61 delay = button->debounce_interval ? 68 if (button->debounce_interval)
62 msecs_to_jiffies(button->debounce_interval) : 0; 69 mod_timer(&bdata->timer,
63 schedule_delayed_work(&bdata->work, delay); 70 jiffies + msecs_to_jiffies(button->debounce_interval));
71 else
72 schedule_work(&bdata->work);
64 73
65 return IRQ_HANDLED; 74 return IRQ_HANDLED;
66} 75}
@@ -107,7 +116,9 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
107 116
108 bdata->input = input; 117 bdata->input = input;
109 bdata->button = button; 118 bdata->button = button;
110 INIT_DELAYED_WORK(&bdata->work, gpio_keys_report_event); 119 setup_timer(&bdata->timer,
120 gpio_keys_timer, (unsigned long)bdata);
121 INIT_WORK(&bdata->work, gpio_keys_report_event);
111 122
112 error = gpio_request(button->gpio, button->desc ?: "gpio_keys"); 123 error = gpio_request(button->gpio, button->desc ?: "gpio_keys");
113 if (error < 0) { 124 if (error < 0) {
@@ -166,7 +177,9 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
166 fail2: 177 fail2:
167 while (--i >= 0) { 178 while (--i >= 0) {
168 free_irq(gpio_to_irq(pdata->buttons[i].gpio), &ddata->data[i]); 179 free_irq(gpio_to_irq(pdata->buttons[i].gpio), &ddata->data[i]);
169 cancel_delayed_work_sync(&ddata->data[i].work); 180 if (pdata->buttons[i].debounce_interval)
181 del_timer_sync(&ddata->data[i].timer);
182 cancel_work_sync(&ddata->data[i].work);
170 gpio_free(pdata->buttons[i].gpio); 183 gpio_free(pdata->buttons[i].gpio);
171 } 184 }
172 185
@@ -190,7 +203,9 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
190 for (i = 0; i < pdata->nbuttons; i++) { 203 for (i = 0; i < pdata->nbuttons; i++) {
191 int irq = gpio_to_irq(pdata->buttons[i].gpio); 204 int irq = gpio_to_irq(pdata->buttons[i].gpio);
192 free_irq(irq, &ddata->data[i]); 205 free_irq(irq, &ddata->data[i]);
193 cancel_delayed_work_sync(&ddata->data[i].work); 206 if (pdata->buttons[i].debounce_interval)
207 del_timer_sync(&ddata->data[i].timer);
208 cancel_work_sync(&ddata->data[i].work);
194 gpio_free(pdata->buttons[i].gpio); 209 gpio_free(pdata->buttons[i].gpio);
195 } 210 }
196 211
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
new file mode 100644
index 000000000000..541b981ff075
--- /dev/null
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -0,0 +1,453 @@
1/*
2 * GPIO driven matrix keyboard driver
3 *
4 * Copyright (c) 2008 Marek Vasut <marek.vasut@gmail.com>
5 *
6 * Based on corgikbd.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/types.h>
15#include <linux/delay.h>
16#include <linux/platform_device.h>
17#include <linux/init.h>
18#include <linux/input.h>
19#include <linux/irq.h>
20#include <linux/interrupt.h>
21#include <linux/jiffies.h>
22#include <linux/module.h>
23#include <linux/gpio.h>
24#include <linux/input/matrix_keypad.h>
25
26struct matrix_keypad {
27 const struct matrix_keypad_platform_data *pdata;
28 struct input_dev *input_dev;
29 unsigned short *keycodes;
30 unsigned int row_shift;
31
32 uint32_t last_key_state[MATRIX_MAX_COLS];
33 struct delayed_work work;
34 bool scan_pending;
35 bool stopped;
36 spinlock_t lock;
37};
38
39/*
40 * NOTE: normally the GPIO has to be put into HiZ when de-activated to cause
41 * minmal side effect when scanning other columns, here it is configured to
42 * be input, and it should work on most platforms.
43 */
44static void __activate_col(const struct matrix_keypad_platform_data *pdata,
45 int col, bool on)
46{
47 bool level_on = !pdata->active_low;
48
49 if (on) {
50 gpio_direction_output(pdata->col_gpios[col], level_on);
51 } else {
52 gpio_set_value_cansleep(pdata->col_gpios[col], !level_on);
53 gpio_direction_input(pdata->col_gpios[col]);
54 }
55}
56
57static void activate_col(const struct matrix_keypad_platform_data *pdata,
58 int col, bool on)
59{
60 __activate_col(pdata, col, on);
61
62 if (on && pdata->col_scan_delay_us)
63 udelay(pdata->col_scan_delay_us);
64}
65
66static void activate_all_cols(const struct matrix_keypad_platform_data *pdata,
67 bool on)
68{
69 int col;
70
71 for (col = 0; col < pdata->num_col_gpios; col++)
72 __activate_col(pdata, col, on);
73}
74
75static bool row_asserted(const struct matrix_keypad_platform_data *pdata,
76 int row)
77{
78 return gpio_get_value_cansleep(pdata->row_gpios[row]) ?
79 !pdata->active_low : pdata->active_low;
80}
81
82static void enable_row_irqs(struct matrix_keypad *keypad)
83{
84 const struct matrix_keypad_platform_data *pdata = keypad->pdata;
85 int i;
86
87 for (i = 0; i < pdata->num_row_gpios; i++)
88 enable_irq(gpio_to_irq(pdata->row_gpios[i]));
89}
90
91static void disable_row_irqs(struct matrix_keypad *keypad)
92{
93 const struct matrix_keypad_platform_data *pdata = keypad->pdata;
94 int i;
95
96 for (i = 0; i < pdata->num_row_gpios; i++)
97 disable_irq_nosync(gpio_to_irq(pdata->row_gpios[i]));
98}
99
100/*
101 * This gets the keys from keyboard and reports it to input subsystem
102 */
103static void matrix_keypad_scan(struct work_struct *work)
104{
105 struct matrix_keypad *keypad =
106 container_of(work, struct matrix_keypad, work.work);
107 struct input_dev *input_dev = keypad->input_dev;
108 const struct matrix_keypad_platform_data *pdata = keypad->pdata;
109 uint32_t new_state[MATRIX_MAX_COLS];
110 int row, col, code;
111
112 /* de-activate all columns for scanning */
113 activate_all_cols(pdata, false);
114
115 memset(new_state, 0, sizeof(new_state));
116
117 /* assert each column and read the row status out */
118 for (col = 0; col < pdata->num_col_gpios; col++) {
119
120 activate_col(pdata, col, true);
121
122 for (row = 0; row < pdata->num_row_gpios; row++)
123 new_state[col] |=
124 row_asserted(pdata, row) ? (1 << row) : 0;
125
126 activate_col(pdata, col, false);
127 }
128
129 for (col = 0; col < pdata->num_col_gpios; col++) {
130 uint32_t bits_changed;
131
132 bits_changed = keypad->last_key_state[col] ^ new_state[col];
133 if (bits_changed == 0)
134 continue;
135
136 for (row = 0; row < pdata->num_row_gpios; row++) {
137 if ((bits_changed & (1 << row)) == 0)
138 continue;
139
140 code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
141 input_event(input_dev, EV_MSC, MSC_SCAN, code);
142 input_report_key(input_dev,
143 keypad->keycodes[code],
144 new_state[col] & (1 << row));
145 }
146 }
147 input_sync(input_dev);
148
149 memcpy(keypad->last_key_state, new_state, sizeof(new_state));
150
151 activate_all_cols(pdata, true);
152
153 /* Enable IRQs again */
154 spin_lock_irq(&keypad->lock);
155 keypad->scan_pending = false;
156 enable_row_irqs(keypad);
157 spin_unlock_irq(&keypad->lock);
158}
159
160static irqreturn_t matrix_keypad_interrupt(int irq, void *id)
161{
162 struct matrix_keypad *keypad = id;
163 unsigned long flags;
164
165 spin_lock_irqsave(&keypad->lock, flags);
166
167 /*
168 * See if another IRQ beaten us to it and scheduled the
169 * scan already. In that case we should not try to
170 * disable IRQs again.
171 */
172 if (unlikely(keypad->scan_pending || keypad->stopped))
173 goto out;
174
175 disable_row_irqs(keypad);
176 keypad->scan_pending = true;
177 schedule_delayed_work(&keypad->work,
178 msecs_to_jiffies(keypad->pdata->debounce_ms));
179
180out:
181 spin_unlock_irqrestore(&keypad->lock, flags);
182 return IRQ_HANDLED;
183}
184
185static int matrix_keypad_start(struct input_dev *dev)
186{
187 struct matrix_keypad *keypad = input_get_drvdata(dev);
188
189 keypad->stopped = false;
190 mb();
191
192 /*
193 * Schedule an immediate key scan to capture current key state;
194 * columns will be activated and IRQs be enabled after the scan.
195 */
196 schedule_delayed_work(&keypad->work, 0);
197
198 return 0;
199}
200
201static void matrix_keypad_stop(struct input_dev *dev)
202{
203 struct matrix_keypad *keypad = input_get_drvdata(dev);
204
205 keypad->stopped = true;
206 mb();
207 flush_work(&keypad->work.work);
208 /*
209 * matrix_keypad_scan() will leave IRQs enabled;
210 * we should disable them now.
211 */
212 disable_row_irqs(keypad);
213}
214
215#ifdef CONFIG_PM
216static int matrix_keypad_suspend(struct platform_device *pdev, pm_message_t state)
217{
218 struct matrix_keypad *keypad = platform_get_drvdata(pdev);
219 const struct matrix_keypad_platform_data *pdata = keypad->pdata;
220 int i;
221
222 matrix_keypad_stop(keypad->input_dev);
223
224 if (device_may_wakeup(&pdev->dev))
225 for (i = 0; i < pdata->num_row_gpios; i++)
226 enable_irq_wake(gpio_to_irq(pdata->row_gpios[i]));
227
228 return 0;
229}
230
231static int matrix_keypad_resume(struct platform_device *pdev)
232{
233 struct matrix_keypad *keypad = platform_get_drvdata(pdev);
234 const struct matrix_keypad_platform_data *pdata = keypad->pdata;
235 int i;
236
237 if (device_may_wakeup(&pdev->dev))
238 for (i = 0; i < pdata->num_row_gpios; i++)
239 disable_irq_wake(gpio_to_irq(pdata->row_gpios[i]));
240
241 matrix_keypad_start(keypad->input_dev);
242
243 return 0;
244}
245#else
246#define matrix_keypad_suspend NULL
247#define matrix_keypad_resume NULL
248#endif
249
250static int __devinit init_matrix_gpio(struct platform_device *pdev,
251 struct matrix_keypad *keypad)
252{
253 const struct matrix_keypad_platform_data *pdata = keypad->pdata;
254 int i, err = -EINVAL;
255
256 /* initialized strobe lines as outputs, activated */
257 for (i = 0; i < pdata->num_col_gpios; i++) {
258 err = gpio_request(pdata->col_gpios[i], "matrix_kbd_col");
259 if (err) {
260 dev_err(&pdev->dev,
261 "failed to request GPIO%d for COL%d\n",
262 pdata->col_gpios[i], i);
263 goto err_free_cols;
264 }
265
266 gpio_direction_output(pdata->col_gpios[i], !pdata->active_low);
267 }
268
269 for (i = 0; i < pdata->num_row_gpios; i++) {
270 err = gpio_request(pdata->row_gpios[i], "matrix_kbd_row");
271 if (err) {
272 dev_err(&pdev->dev,
273 "failed to request GPIO%d for ROW%d\n",
274 pdata->row_gpios[i], i);
275 goto err_free_rows;
276 }
277
278 gpio_direction_input(pdata->row_gpios[i]);
279 }
280
281 for (i = 0; i < pdata->num_row_gpios; i++) {
282 err = request_irq(gpio_to_irq(pdata->row_gpios[i]),
283 matrix_keypad_interrupt,
284 IRQF_DISABLED |
285 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
286 "matrix-keypad", keypad);
287 if (err) {
288 dev_err(&pdev->dev,
289 "Unable to acquire interrupt for GPIO line %i\n",
290 pdata->row_gpios[i]);
291 goto err_free_irqs;
292 }
293 }
294
295 /* initialized as disabled - enabled by input->open */
296 disable_row_irqs(keypad);
297 return 0;
298
299err_free_irqs:
300 while (--i >= 0)
301 free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
302 i = pdata->num_row_gpios;
303err_free_rows:
304 while (--i >= 0)
305 gpio_free(pdata->row_gpios[i]);
306 i = pdata->num_col_gpios;
307err_free_cols:
308 while (--i >= 0)
309 gpio_free(pdata->col_gpios[i]);
310
311 return err;
312}
313
314static int __devinit matrix_keypad_probe(struct platform_device *pdev)
315{
316 const struct matrix_keypad_platform_data *pdata;
317 const struct matrix_keymap_data *keymap_data;
318 struct matrix_keypad *keypad;
319 struct input_dev *input_dev;
320 unsigned short *keycodes;
321 unsigned int row_shift;
322 int i;
323 int err;
324
325 pdata = pdev->dev.platform_data;
326 if (!pdata) {
327 dev_err(&pdev->dev, "no platform data defined\n");
328 return -EINVAL;
329 }
330
331 keymap_data = pdata->keymap_data;
332 if (!keymap_data) {
333 dev_err(&pdev->dev, "no keymap data defined\n");
334 return -EINVAL;
335 }
336
337 row_shift = get_count_order(pdata->num_col_gpios);
338
339 keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL);
340 keycodes = kzalloc((pdata->num_row_gpios << row_shift) *
341 sizeof(*keycodes),
342 GFP_KERNEL);
343 input_dev = input_allocate_device();
344 if (!keypad || !keycodes || !input_dev) {
345 err = -ENOMEM;
346 goto err_free_mem;
347 }
348
349 keypad->input_dev = input_dev;
350 keypad->pdata = pdata;
351 keypad->keycodes = keycodes;
352 keypad->row_shift = row_shift;
353 keypad->stopped = true;
354 INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
355 spin_lock_init(&keypad->lock);
356
357 input_dev->name = pdev->name;
358 input_dev->id.bustype = BUS_HOST;
359 input_dev->dev.parent = &pdev->dev;
360 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
361 input_dev->open = matrix_keypad_start;
362 input_dev->close = matrix_keypad_stop;
363
364 input_dev->keycode = keycodes;
365 input_dev->keycodesize = sizeof(*keycodes);
366 input_dev->keycodemax = pdata->num_row_gpios << keypad->row_shift;
367
368 for (i = 0; i < keymap_data->keymap_size; i++) {
369 unsigned int key = keymap_data->keymap[i];
370 unsigned int row = KEY_ROW(key);
371 unsigned int col = KEY_COL(key);
372 unsigned short code = KEY_VAL(key);
373
374 keycodes[MATRIX_SCAN_CODE(row, col, row_shift)] = code;
375 __set_bit(code, input_dev->keybit);
376 }
377 __clear_bit(KEY_RESERVED, input_dev->keybit);
378
379 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
380 input_set_drvdata(input_dev, keypad);
381
382 err = init_matrix_gpio(pdev, keypad);
383 if (err)
384 goto err_free_mem;
385
386 err = input_register_device(keypad->input_dev);
387 if (err)
388 goto err_free_mem;
389
390 device_init_wakeup(&pdev->dev, pdata->wakeup);
391 platform_set_drvdata(pdev, keypad);
392
393 return 0;
394
395err_free_mem:
396 input_free_device(input_dev);
397 kfree(keycodes);
398 kfree(keypad);
399 return err;
400}
401
402static int __devexit matrix_keypad_remove(struct platform_device *pdev)
403{
404 struct matrix_keypad *keypad = platform_get_drvdata(pdev);
405 const struct matrix_keypad_platform_data *pdata = keypad->pdata;
406 int i;
407
408 device_init_wakeup(&pdev->dev, 0);
409
410 for (i = 0; i < pdata->num_row_gpios; i++) {
411 free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
412 gpio_free(pdata->row_gpios[i]);
413 }
414
415 for (i = 0; i < pdata->num_col_gpios; i++)
416 gpio_free(pdata->col_gpios[i]);
417
418 input_unregister_device(keypad->input_dev);
419 platform_set_drvdata(pdev, NULL);
420 kfree(keypad->keycodes);
421 kfree(keypad);
422
423 return 0;
424}
425
426static struct platform_driver matrix_keypad_driver = {
427 .probe = matrix_keypad_probe,
428 .remove = __devexit_p(matrix_keypad_remove),
429 .suspend = matrix_keypad_suspend,
430 .resume = matrix_keypad_resume,
431 .driver = {
432 .name = "matrix-keypad",
433 .owner = THIS_MODULE,
434 },
435};
436
437static int __init matrix_keypad_init(void)
438{
439 return platform_driver_register(&matrix_keypad_driver);
440}
441
442static void __exit matrix_keypad_exit(void)
443{
444 platform_driver_unregister(&matrix_keypad_driver);
445}
446
447module_init(matrix_keypad_init);
448module_exit(matrix_keypad_exit);
449
450MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
451MODULE_DESCRIPTION("GPIO Driven Matrix Keypad Driver");
452MODULE_LICENSE("GPL v2");
453MODULE_ALIAS("platform:matrix-keypad");
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 2adf9cb265da..d114d3a9e1e9 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Cobalt button interface driver. 2 * Cobalt button interface driver.
3 * 3 *
4 * Copyright (C) 2007-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 4 * Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -148,7 +148,7 @@ static int __devexit cobalt_buttons_remove(struct platform_device *pdev)
148 return 0; 148 return 0;
149} 149}
150 150
151MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); 151MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
152MODULE_DESCRIPTION("Cobalt button interface driver"); 152MODULE_DESCRIPTION("Cobalt button interface driver");
153MODULE_LICENSE("GPL"); 153MODULE_LICENSE("GPL");
154/* work with hotplug and coldplug */ 154/* work with hotplug and coldplug */
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 6d67af5387ad..21cb755a54fb 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -114,7 +114,7 @@ static int __devexit pcspkr_remove(struct platform_device *dev)
114 return 0; 114 return 0;
115} 115}
116 116
117static int pcspkr_suspend(struct platform_device *dev, pm_message_t state) 117static int pcspkr_suspend(struct device *dev)
118{ 118{
119 pcspkr_event(NULL, EV_SND, SND_BELL, 0); 119 pcspkr_event(NULL, EV_SND, SND_BELL, 0);
120 120
@@ -127,14 +127,18 @@ static void pcspkr_shutdown(struct platform_device *dev)
127 pcspkr_event(NULL, EV_SND, SND_BELL, 0); 127 pcspkr_event(NULL, EV_SND, SND_BELL, 0);
128} 128}
129 129
130static struct dev_pm_ops pcspkr_pm_ops = {
131 .suspend = pcspkr_suspend,
132};
133
130static struct platform_driver pcspkr_platform_driver = { 134static struct platform_driver pcspkr_platform_driver = {
131 .driver = { 135 .driver = {
132 .name = "pcspkr", 136 .name = "pcspkr",
133 .owner = THIS_MODULE, 137 .owner = THIS_MODULE,
138 .pm = &pcspkr_pm_ops,
134 }, 139 },
135 .probe = pcspkr_probe, 140 .probe = pcspkr_probe,
136 .remove = __devexit_p(pcspkr_remove), 141 .remove = __devexit_p(pcspkr_remove),
137 .suspend = pcspkr_suspend,
138 .shutdown = pcspkr_shutdown, 142 .shutdown = pcspkr_shutdown,
139}; 143};
140 144
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 7c8957dd22c0..27ee976eb54c 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -611,6 +611,20 @@ static struct key_entry keymap_wistron_generic[] __initdata = {
611 { KE_END, 0 } 611 { KE_END, 0 }
612}; 612};
613 613
614static struct key_entry keymap_prestigio[] __initdata = {
615 { KE_KEY, 0x11, {KEY_PROG1} },
616 { KE_KEY, 0x12, {KEY_PROG2} },
617 { KE_WIFI, 0x30 },
618 { KE_KEY, 0x22, {KEY_REWIND} },
619 { KE_KEY, 0x23, {KEY_FORWARD} },
620 { KE_KEY, 0x24, {KEY_PLAYPAUSE} },
621 { KE_KEY, 0x25, {KEY_STOPCD} },
622 { KE_KEY, 0x31, {KEY_MAIL} },
623 { KE_KEY, 0x36, {KEY_WWW} },
624 { KE_END, 0 }
625};
626
627
614/* 628/*
615 * If your machine is not here (which is currently rather likely), please send 629 * If your machine is not here (which is currently rather likely), please send
616 * a list of buttons and their key codes (reported when loading this module 630 * a list of buttons and their key codes (reported when loading this module
@@ -646,6 +660,15 @@ static struct dmi_system_id dmi_ids[] __initdata = {
646 }, 660 },
647 { 661 {
648 .callback = dmi_matched, 662 .callback = dmi_matched,
663 .ident = "Maxdata Pro 7000 DX",
664 .matches = {
665 DMI_MATCH(DMI_SYS_VENDOR, "MAXDATA"),
666 DMI_MATCH(DMI_PRODUCT_NAME, "Pro 7000"),
667 },
668 .driver_data = keymap_fs_amilo_pro_v2000
669 },
670 {
671 .callback = dmi_matched,
649 .ident = "Fujitsu N3510", 672 .ident = "Fujitsu N3510",
650 .matches = { 673 .matches = {
651 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 674 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -962,6 +985,8 @@ static int __init select_keymap(void)
962 if (keymap_name != NULL) { 985 if (keymap_name != NULL) {
963 if (strcmp (keymap_name, "1557/MS2141") == 0) 986 if (strcmp (keymap_name, "1557/MS2141") == 0)
964 keymap = keymap_wistron_ms2141; 987 keymap = keymap_wistron_ms2141;
988 else if (strcmp (keymap_name, "prestigio") == 0)
989 keymap = keymap_prestigio;
965 else if (strcmp (keymap_name, "generic") == 0) 990 else if (strcmp (keymap_name, "generic") == 0)
966 keymap = keymap_wistron_generic; 991 keymap = keymap_wistron_generic;
967 else { 992 else {
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 5e5eb88d8d1e..7b6ce178f1b6 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -46,7 +46,7 @@ static void gpio_mouse_scan(struct input_polled_dev *dev)
46 input_sync(input); 46 input_sync(input);
47} 47}
48 48
49static int __init gpio_mouse_probe(struct platform_device *pdev) 49static int __devinit gpio_mouse_probe(struct platform_device *pdev)
50{ 50{
51 struct gpio_mouse_platform_data *pdata = pdev->dev.platform_data; 51 struct gpio_mouse_platform_data *pdata = pdev->dev.platform_data;
52 struct input_polled_dev *input_poll; 52 struct input_polled_dev *input_poll;
@@ -170,10 +170,8 @@ static int __devexit gpio_mouse_remove(struct platform_device *pdev)
170 return 0; 170 return 0;
171} 171}
172 172
173/* work with hotplug and coldplug */
174MODULE_ALIAS("platform:gpio_mouse");
175
176static struct platform_driver gpio_mouse_device_driver = { 173static struct platform_driver gpio_mouse_device_driver = {
174 .probe = gpio_mouse_probe,
177 .remove = __devexit_p(gpio_mouse_remove), 175 .remove = __devexit_p(gpio_mouse_remove),
178 .driver = { 176 .driver = {
179 .name = "gpio_mouse", 177 .name = "gpio_mouse",
@@ -183,8 +181,7 @@ static struct platform_driver gpio_mouse_device_driver = {
183 181
184static int __init gpio_mouse_init(void) 182static int __init gpio_mouse_init(void)
185{ 183{
186 return platform_driver_probe(&gpio_mouse_device_driver, 184 return platform_driver_register(&gpio_mouse_device_driver);
187 gpio_mouse_probe);
188} 185}
189module_init(gpio_mouse_init); 186module_init(gpio_mouse_init);
190 187
@@ -197,3 +194,5 @@ module_exit(gpio_mouse_exit);
197MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); 194MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
198MODULE_DESCRIPTION("GPIO mouse driver"); 195MODULE_DESCRIPTION("GPIO mouse driver");
199MODULE_LICENSE("GPL"); 196MODULE_LICENSE("GPL");
197MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */
198
diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c
index b587e2d576ac..820e51673b26 100644
--- a/drivers/input/serio/hp_sdc_mlc.c
+++ b/drivers/input/serio/hp_sdc_mlc.c
@@ -296,7 +296,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc)
296 priv->tseq[3] = 0; 296 priv->tseq[3] = 0;
297 if (mlc->opacket & HIL_CTRL_APE) { 297 if (mlc->opacket & HIL_CTRL_APE) {
298 priv->tseq[3] |= HP_SDC_LPC_APE_IPF; 298 priv->tseq[3] |= HP_SDC_LPC_APE_IPF;
299 down_trylock(&mlc->csem); 299 BUG_ON(down_trylock(&mlc->csem));
300 } 300 }
301 enqueue: 301 enqueue:
302 hp_sdc_enqueue_transaction(&priv->trans); 302 hp_sdc_enqueue_transaction(&priv->trans);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index fb8a3cd3ffd0..ae04d8a494e5 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -78,6 +78,14 @@ static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = {
78 }, 78 },
79 }, 79 },
80 { 80 {
81 .ident = "ASUS G1S",
82 .matches = {
83 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
84 DMI_MATCH(DMI_BOARD_NAME, "G1S"),
85 DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
86 },
87 },
88 {
81 /* AUX LOOP command does not raise AUX IRQ */ 89 /* AUX LOOP command does not raise AUX IRQ */
82 .ident = "ASUS P65UP5", 90 .ident = "ASUS P65UP5",
83 .matches = { 91 .matches = {
@@ -392,6 +400,34 @@ static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
392 DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."), 400 DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
393 }, 401 },
394 }, 402 },
403 {
404 .ident = "Acer Aspire One 150",
405 .matches = {
406 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
407 DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
408 },
409 },
410 {
411 .ident = "Advent 4211",
412 .matches = {
413 DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
414 DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
415 },
416 },
417 {
418 .ident = "Medion Akoya Mini E1210",
419 .matches = {
420 DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
421 DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
422 },
423 },
424 {
425 .ident = "Mivvy M310",
426 .matches = {
427 DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
428 DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
429 },
430 },
395 { } 431 { }
396}; 432};
397 433
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index f919bf57293c..582245c497eb 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -934,10 +934,11 @@ static bool i8042_suspended;
934 934
935static int i8042_suspend(struct platform_device *dev, pm_message_t state) 935static int i8042_suspend(struct platform_device *dev, pm_message_t state)
936{ 936{
937 if (!i8042_suspended && state.event == PM_EVENT_SUSPEND) { 937 if (!i8042_suspended && state.event == PM_EVENT_SUSPEND)
938 i8042_controller_reset(); 938 i8042_controller_reset();
939 i8042_suspended = true; 939
940 } 940 i8042_suspended = state.event == PM_EVENT_SUSPEND ||
941 state.event == PM_EVENT_FREEZE;
941 942
942 return 0; 943 return 0;
943} 944}
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index fb17573f8f2d..d66f4944f2a0 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -935,10 +935,11 @@ static int serio_suspend(struct device *dev, pm_message_t state)
935{ 935{
936 struct serio *serio = to_serio_port(dev); 936 struct serio *serio = to_serio_port(dev);
937 937
938 if (!serio->suspended && state.event == PM_EVENT_SUSPEND) { 938 if (!serio->suspended && state.event == PM_EVENT_SUSPEND)
939 serio_cleanup(serio); 939 serio_cleanup(serio);
940 serio->suspended = true; 940
941 } 941 serio->suspended = state.event == PM_EVENT_SUSPEND ||
942 state.event == PM_EVENT_FREEZE;
942 943
943 return 0; 944 return 0;
944} 945}
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 38bf86384aeb..c896d6a21b7e 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -384,6 +384,8 @@ static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
384 wacom_report_key(wcombo, BTN_STYLUS2, 0); 384 wacom_report_key(wcombo, BTN_STYLUS2, 0);
385 wacom_report_key(wcombo, BTN_TOUCH, 0); 385 wacom_report_key(wcombo, BTN_TOUCH, 0);
386 wacom_report_abs(wcombo, ABS_WHEEL, 0); 386 wacom_report_abs(wcombo, ABS_WHEEL, 0);
387 if (wacom->features->type >= INTUOS3S)
388 wacom_report_abs(wcombo, ABS_Z, 0);
387 } 389 }
388 wacom_report_key(wcombo, wacom->tool[idx], 0); 390 wacom_report_key(wcombo, wacom->tool[idx], 0);
389 wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */ 391 wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
@@ -836,6 +838,7 @@ static struct wacom_features wacom_features[] = {
836 { "Wacom DTU710", 8, 34080, 27660, 511, 0, PL }, 838 { "Wacom DTU710", 8, 34080, 27660, 511, 0, PL },
837 { "Wacom DTF521", 8, 6282, 4762, 511, 0, PL }, 839 { "Wacom DTF521", 8, 6282, 4762, 511, 0, PL },
838 { "Wacom DTF720", 8, 6858, 5506, 511, 0, PL }, 840 { "Wacom DTF720", 8, 6858, 5506, 511, 0, PL },
841 { "Wacom DTF720a", 8, 6858, 5506, 511, 0, PL },
839 { "Wacom Cintiq Partner",8, 20480, 15360, 511, 0, PTU }, 842 { "Wacom Cintiq Partner",8, 20480, 15360, 511, 0, PTU },
840 { "Wacom Intuos2 4x5", 10, 12700, 10600, 1023, 31, INTUOS }, 843 { "Wacom Intuos2 4x5", 10, 12700, 10600, 1023, 31, INTUOS },
841 { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS }, 844 { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS },
@@ -897,8 +900,9 @@ static struct usb_device_id wacom_ids[] = {
897 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x37) }, 900 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x37) },
898 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x38) }, 901 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x38) },
899 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x39) }, 902 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x39) },
900 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC0) },
901 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC4) }, 903 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC4) },
904 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC0) },
905 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC2) },
902 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x03) }, 906 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x03) },
903 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x41) }, 907 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x41) },
904 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x42) }, 908 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x42) },
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index ec5169604a6a..2d91049571a4 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -294,32 +294,33 @@ struct reply_t gigaset_tab_cid[] =
294 {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}}, 294 {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}},
295 {RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, 295 {RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
296 {RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, 296 {RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
297 {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */ 297 {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"},
298 {RSP_OK, 607,607, -1, 608,-1}, 298 {RSP_OK, 607,607, -1, 608,-1},
299 //{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 608, 0, {ACT_ERROR}},//DELETE
300 {RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}}, 299 {RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}},
301 {RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}}, 300 {RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}},
302 301
303 {RSP_ZVLS, 608,608, 17, -1,-1, {ACT_DEBUG}},
304 {RSP_ZCTP, 609,609, -1, -1,-1, {ACT_DEBUG}},
305 {RSP_ZCPN, 609,609, -1, -1,-1, {ACT_DEBUG}},
306 {RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, 302 {RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
307 {EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, 303 {EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
308 304
309 /* dialing */ 305 /* optional dialing responses */
310 {RSP_ZCTP, 650,650, -1, -1,-1, {ACT_DEBUG}}, 306 {EV_BC_OPEN, 650,650, -1, 651,-1},
311 {RSP_ZCPN, 650,650, -1, -1,-1, {ACT_DEBUG}}, 307 {RSP_ZVLS, 608,651, 17, -1,-1, {ACT_DEBUG}},
312 {RSP_ZSAU, 650,650,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, /* some devices don't send this */ 308 {RSP_ZCTP, 609,651, -1, -1,-1, {ACT_DEBUG}},
313 309 {RSP_ZCPN, 609,651, -1, -1,-1, {ACT_DEBUG}},
314 /* connection established */ 310 {RSP_ZSAU, 650,651,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}},
315 {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1 311
316 {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1 312 /* connect */
317 313 {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}},
318 {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, //FIXME new constate + timeout 314 {RSP_ZSAU, 651,651,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT,
315 ACT_NOTIFY_BC_UP}},
316 {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}},
317 {RSP_ZSAU, 751,751,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT,
318 ACT_NOTIFY_BC_UP}},
319 {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}},
319 320
320 /* remote hangup */ 321 /* remote hangup */
321 {RSP_ZSAU, 650,650,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}}, 322 {RSP_ZSAU, 650,651,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}},
322 {RSP_ZSAU, 750,750,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, 323 {RSP_ZSAU, 750,751,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
323 {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, 324 {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
324 325
325 /* hangup */ 326 /* hangup */
@@ -358,7 +359,8 @@ struct reply_t gigaset_tab_cid[] =
358 {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}}, 359 {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}},
359 {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}}, 360 {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}},
360 361
361 {EV_TIMEOUT, 750,750, -1, 0, 0, {ACT_CONNTIMEOUT}}, 362 {EV_BC_OPEN, 750,750, -1, 751,-1},
363 {EV_TIMEOUT, 750,751, -1, 0, 0, {ACT_CONNTIMEOUT}},
362 364
363 /* B channel closed (general case) */ 365 /* B channel closed (general case) */
364 {EV_BC_CLOSED, -1, -1, -1, -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME 366 {EV_BC_CLOSED, -1, -1, -1, -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME
@@ -876,12 +878,6 @@ static void bchannel_down(struct bc_state *bcs)
876 878
877static void bchannel_up(struct bc_state *bcs) 879static void bchannel_up(struct bc_state *bcs)
878{ 880{
879 if (!(bcs->chstate & CHS_D_UP)) {
880 dev_notice(bcs->cs->dev, "%s: D channel not up\n", __func__);
881 bcs->chstate |= CHS_D_UP;
882 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
883 }
884
885 if (bcs->chstate & CHS_B_UP) { 881 if (bcs->chstate & CHS_B_UP) {
886 dev_notice(bcs->cs->dev, "%s: B channel already up\n", 882 dev_notice(bcs->cs->dev, "%s: B channel already up\n",
887 __func__); 883 __func__);
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 1ebfcab74662..8ff7e35c7069 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -408,6 +408,8 @@ static int if_write_room(struct tty_struct *tty)
408 return retval; 408 return retval;
409} 409}
410 410
411/* FIXME: This function does not have error returns */
412
411static int if_chars_in_buffer(struct tty_struct *tty) 413static int if_chars_in_buffer(struct tty_struct *tty)
412{ 414{
413 struct cardstate *cs; 415 struct cardstate *cs;
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index db3a1e4cd489..bed38fcc432b 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -174,12 +174,6 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
174 pr_err("invalid size %d\n", size); 174 pr_err("invalid size %d\n", size);
175 return -EINVAL; 175 return -EINVAL;
176 } 176 }
177 src = iwb->read;
178 if (unlikely(limit >= BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
179 (read < src && limit >= src))) {
180 pr_err("isoc write buffer frame reservation violated\n");
181 return -EFAULT;
182 }
183#endif 177#endif
184 178
185 if (read < write) { 179 if (read < write) {
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 8df889b0c1a9..9de54202c90c 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -37,7 +37,6 @@
37#include <linux/kernel_stat.h> 37#include <linux/kernel_stat.h>
38#include <linux/usb.h> 38#include <linux/usb.h>
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/smp_lock.h>
41#include <linux/sched.h> 40#include <linux/sched.h>
42#include <linux/moduleparam.h> 41#include <linux/moduleparam.h>
43#include "hisax.h" 42#include "hisax.h"
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index b4d4522e5071..2881a66c1aa9 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/isdn.h> 14#include <linux/isdn.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/smp_lock.h>
16#include "isdn_common.h" 17#include "isdn_common.h"
17#include "isdn_tty.h" 18#include "isdn_tty.h"
18#ifdef CONFIG_ISDN_AUDIO 19#ifdef CONFIG_ISDN_AUDIO
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 990e6a7e6674..7e5f30dbc0a0 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -731,10 +731,10 @@ l1oip_socket_thread(void *data)
731 while (!signal_pending(current)) { 731 while (!signal_pending(current)) {
732 struct kvec iov = { 732 struct kvec iov = {
733 .iov_base = recvbuf, 733 .iov_base = recvbuf,
734 .iov_len = sizeof(recvbuf), 734 .iov_len = recvbuf_size,
735 }; 735 };
736 recvlen = kernel_recvmsg(socket, &msg, &iov, 1, 736 recvlen = kernel_recvmsg(socket, &msg, &iov, 1,
737 sizeof(recvbuf), 0); 737 recvbuf_size, 0);
738 if (recvlen > 0) { 738 if (recvlen > 0) {
739 l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); 739 l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
740 } else { 740 } else {
@@ -1480,7 +1480,7 @@ l1oip_init(void)
1480 return -ENOMEM; 1480 return -ENOMEM;
1481 1481
1482 l1oip_cnt = 0; 1482 l1oip_cnt = 0;
1483 while (type[l1oip_cnt] && l1oip_cnt < MAX_CARDS) { 1483 while (l1oip_cnt < MAX_CARDS && type[l1oip_cnt]) {
1484 switch (type[l1oip_cnt] & 0xff) { 1484 switch (type[l1oip_cnt] & 0xff) {
1485 case 1: 1485 case 1:
1486 pri = 0; 1486 pri = 0;
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index e2f45019ebf0..3e1532a180ff 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/mISDNif.h> 18#include <linux/mISDNif.h>
19#include <linux/kthread.h> 19#include <linux/kthread.h>
20#include <linux/smp_lock.h>
20#include "core.h" 21#include "core.h"
21 22
22static u_int *debug; 23static u_int *debug;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9b60b6b684d9..7c8e7122aaa9 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -75,6 +75,7 @@ config LEDS_ALIX2
75 depends on LEDS_CLASS && X86 && EXPERIMENTAL 75 depends on LEDS_CLASS && X86 && EXPERIMENTAL
76 help 76 help
77 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. 77 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
78 You have to set leds-alix2.force=1 for boards with Award BIOS.
78 79
79config LEDS_H1940 80config LEDS_H1940
80 tristate "LED Support for iPAQ H1940 device" 81 tristate "LED Support for iPAQ H1940 device"
@@ -145,15 +146,16 @@ config LEDS_GPIO_OF
145 of_platform devices. For instance, LEDs which are listed in a "dts" 146 of_platform devices. For instance, LEDs which are listed in a "dts"
146 file. 147 file.
147 148
148config LEDS_LP5521 149config LEDS_LP3944
149 tristate "LED Support for the LP5521 LEDs" 150 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
150 depends on LEDS_CLASS && I2C 151 depends on LEDS_CLASS && I2C
151 help 152 help
152 If you say 'Y' here you get support for the National Semiconductor 153 This option enables support for LEDs connected to the National
153 LP5521 LED driver used in n8x0 boards. 154 Semiconductor LP3944 Lighting Management Unit (LMU) also known as
155 Fun Light Chip.
154 156
155 This driver can be built as a module by choosing 'M'. The module 157 To compile this driver as a module, choose M here: the
156 will be called leds-lp5521. 158 module will be called leds-lp3944.
157 159
158config LEDS_CLEVO_MAIL 160config LEDS_CLEVO_MAIL
159 tristate "Mail LED on Clevo notebook" 161 tristate "Mail LED on Clevo notebook"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2d41c4dcf92f..e8cdcf77a4c3 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
20obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o 20obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
21obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o 21obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
22obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o 22obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
23obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
23obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o 24obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
24obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o 25obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
25obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 26obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index ddbd7730dfc8..731d4eef3425 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -14,7 +14,7 @@
14 14
15static int force = 0; 15static int force = 0;
16module_param(force, bool, 0444); 16module_param(force, bool, 0444);
17MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs"); 17MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
18 18
19struct alix_led { 19struct alix_led {
20 struct led_classdev cdev; 20 struct led_classdev cdev;
@@ -155,6 +155,11 @@ static int __init alix_led_init(void)
155 goto out; 155 goto out;
156 } 156 }
157 157
158 /* enable output on GPIO for LED 1,2,3 */
159 outl(1 << 6, 0x6104);
160 outl(1 << 9, 0x6184);
161 outl(1 << 11, 0x6184);
162
158 pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0); 163 pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
159 if (!IS_ERR(pdev)) { 164 if (!IS_ERR(pdev)) {
160 ret = platform_driver_probe(&alix_led_driver, alix_led_probe); 165 ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 4149ecb3a9b2..779d7f262c04 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -97,6 +97,10 @@ struct bd2802_led {
97 enum led_ids led_id; 97 enum led_ids led_id;
98 enum led_colors color; 98 enum led_colors color;
99 enum led_bits state; 99 enum led_bits state;
100
101 /* General attributes of RGB LEDs */
102 int wave_pattern;
103 int rgb_current;
100}; 104};
101 105
102 106
@@ -254,7 +258,7 @@ static void bd2802_set_on(struct bd2802_led *led, enum led_ids id,
254 bd2802_reset_cancel(led); 258 bd2802_reset_cancel(led);
255 259
256 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); 260 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
257 bd2802_write_byte(led->client, reg, BD2802_CURRENT_032); 261 bd2802_write_byte(led->client, reg, led->rgb_current);
258 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); 262 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
259 bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); 263 bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
260 reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); 264 reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
@@ -275,9 +279,9 @@ static void bd2802_set_blink(struct bd2802_led *led, enum led_ids id,
275 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); 279 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
276 bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); 280 bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
277 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); 281 reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
278 bd2802_write_byte(led->client, reg, BD2802_CURRENT_032); 282 bd2802_write_byte(led->client, reg, led->rgb_current);
279 reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); 283 reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
280 bd2802_write_byte(led->client, reg, BD2802_PATTERN_HALF); 284 bd2802_write_byte(led->client, reg, led->wave_pattern);
281 285
282 bd2802_enable(led, id); 286 bd2802_enable(led, id);
283 bd2802_update_state(led, id, color, BD2802_BLINK); 287 bd2802_update_state(led, id, color, BD2802_BLINK);
@@ -406,7 +410,7 @@ static void bd2802_enable_adv_conf(struct bd2802_led *led)
406 ret = device_create_file(&led->client->dev, 410 ret = device_create_file(&led->client->dev,
407 bd2802_addr_attributes[i]); 411 bd2802_addr_attributes[i]);
408 if (ret) { 412 if (ret) {
409 dev_err(&led->client->dev, "failed to sysfs file %s\n", 413 dev_err(&led->client->dev, "failed: sysfs file %s\n",
410 bd2802_addr_attributes[i]->attr.name); 414 bd2802_addr_attributes[i]->attr.name);
411 goto failed_remove_files; 415 goto failed_remove_files;
412 } 416 }
@@ -483,6 +487,52 @@ static struct device_attribute bd2802_adv_conf_attr = {
483 .store = bd2802_store_adv_conf, 487 .store = bd2802_store_adv_conf,
484}; 488};
485 489
490#define BD2802_CONTROL_ATTR(attr_name, name_str) \
491static ssize_t bd2802_show_##attr_name(struct device *dev, \
492 struct device_attribute *attr, char *buf) \
493{ \
494 struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
495 ssize_t ret; \
496 down_read(&led->rwsem); \
497 ret = sprintf(buf, "0x%02x\n", led->attr_name); \
498 up_read(&led->rwsem); \
499 return ret; \
500} \
501static ssize_t bd2802_store_##attr_name(struct device *dev, \
502 struct device_attribute *attr, const char *buf, size_t count) \
503{ \
504 struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
505 unsigned long val; \
506 int ret; \
507 if (!count) \
508 return -EINVAL; \
509 ret = strict_strtoul(buf, 16, &val); \
510 if (ret) \
511 return ret; \
512 down_write(&led->rwsem); \
513 led->attr_name = val; \
514 up_write(&led->rwsem); \
515 return count; \
516} \
517static struct device_attribute bd2802_##attr_name##_attr = { \
518 .attr = { \
519 .name = name_str, \
520 .mode = 0644, \
521 .owner = THIS_MODULE \
522 }, \
523 .show = bd2802_show_##attr_name, \
524 .store = bd2802_store_##attr_name, \
525};
526
527BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern");
528BD2802_CONTROL_ATTR(rgb_current, "rgb_current");
529
530static struct device_attribute *bd2802_attributes[] = {
531 &bd2802_adv_conf_attr,
532 &bd2802_wave_pattern_attr,
533 &bd2802_rgb_current_attr,
534};
535
486static void bd2802_led_work(struct work_struct *work) 536static void bd2802_led_work(struct work_struct *work)
487{ 537{
488 struct bd2802_led *led = container_of(work, struct bd2802_led, work); 538 struct bd2802_led *led = container_of(work, struct bd2802_led, work);
@@ -538,7 +588,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
538 led->cdev_led1r.brightness = LED_OFF; 588 led->cdev_led1r.brightness = LED_OFF;
539 led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness; 589 led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness;
540 led->cdev_led1r.blink_set = bd2802_set_led1r_blink; 590 led->cdev_led1r.blink_set = bd2802_set_led1r_blink;
541 led->cdev_led1r.flags |= LED_CORE_SUSPENDRESUME;
542 591
543 ret = led_classdev_register(&led->client->dev, &led->cdev_led1r); 592 ret = led_classdev_register(&led->client->dev, &led->cdev_led1r);
544 if (ret < 0) { 593 if (ret < 0) {
@@ -551,7 +600,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
551 led->cdev_led1g.brightness = LED_OFF; 600 led->cdev_led1g.brightness = LED_OFF;
552 led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness; 601 led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness;
553 led->cdev_led1g.blink_set = bd2802_set_led1g_blink; 602 led->cdev_led1g.blink_set = bd2802_set_led1g_blink;
554 led->cdev_led1g.flags |= LED_CORE_SUSPENDRESUME;
555 603
556 ret = led_classdev_register(&led->client->dev, &led->cdev_led1g); 604 ret = led_classdev_register(&led->client->dev, &led->cdev_led1g);
557 if (ret < 0) { 605 if (ret < 0) {
@@ -564,7 +612,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
564 led->cdev_led1b.brightness = LED_OFF; 612 led->cdev_led1b.brightness = LED_OFF;
565 led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness; 613 led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness;
566 led->cdev_led1b.blink_set = bd2802_set_led1b_blink; 614 led->cdev_led1b.blink_set = bd2802_set_led1b_blink;
567 led->cdev_led1b.flags |= LED_CORE_SUSPENDRESUME;
568 615
569 ret = led_classdev_register(&led->client->dev, &led->cdev_led1b); 616 ret = led_classdev_register(&led->client->dev, &led->cdev_led1b);
570 if (ret < 0) { 617 if (ret < 0) {
@@ -577,7 +624,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
577 led->cdev_led2r.brightness = LED_OFF; 624 led->cdev_led2r.brightness = LED_OFF;
578 led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness; 625 led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness;
579 led->cdev_led2r.blink_set = bd2802_set_led2r_blink; 626 led->cdev_led2r.blink_set = bd2802_set_led2r_blink;
580 led->cdev_led2r.flags |= LED_CORE_SUSPENDRESUME;
581 627
582 ret = led_classdev_register(&led->client->dev, &led->cdev_led2r); 628 ret = led_classdev_register(&led->client->dev, &led->cdev_led2r);
583 if (ret < 0) { 629 if (ret < 0) {
@@ -590,7 +636,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
590 led->cdev_led2g.brightness = LED_OFF; 636 led->cdev_led2g.brightness = LED_OFF;
591 led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness; 637 led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness;
592 led->cdev_led2g.blink_set = bd2802_set_led2g_blink; 638 led->cdev_led2g.blink_set = bd2802_set_led2g_blink;
593 led->cdev_led2g.flags |= LED_CORE_SUSPENDRESUME;
594 639
595 ret = led_classdev_register(&led->client->dev, &led->cdev_led2g); 640 ret = led_classdev_register(&led->client->dev, &led->cdev_led2g);
596 if (ret < 0) { 641 if (ret < 0) {
@@ -640,7 +685,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
640{ 685{
641 struct bd2802_led *led; 686 struct bd2802_led *led;
642 struct bd2802_led_platform_data *pdata; 687 struct bd2802_led_platform_data *pdata;
643 int ret; 688 int ret, i;
644 689
645 led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL); 690 led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL);
646 if (!led) { 691 if (!led) {
@@ -670,13 +715,20 @@ static int __devinit bd2802_probe(struct i2c_client *client,
670 /* To save the power, reset BD2802 after detecting */ 715 /* To save the power, reset BD2802 after detecting */
671 gpio_set_value(led->pdata->reset_gpio, 0); 716 gpio_set_value(led->pdata->reset_gpio, 0);
672 717
718 /* Default attributes */
719 led->wave_pattern = BD2802_PATTERN_HALF;
720 led->rgb_current = BD2802_CURRENT_032;
721
673 init_rwsem(&led->rwsem); 722 init_rwsem(&led->rwsem);
674 723
675 ret = device_create_file(&client->dev, &bd2802_adv_conf_attr); 724 for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) {
676 if (ret) { 725 ret = device_create_file(&led->client->dev,
677 dev_err(&client->dev, "failed to create sysfs file %s\n", 726 bd2802_attributes[i]);
678 bd2802_adv_conf_attr.attr.name); 727 if (ret) {
679 goto failed_free; 728 dev_err(&led->client->dev, "failed: sysfs file %s\n",
729 bd2802_attributes[i]->attr.name);
730 goto failed_unregister_dev_file;
731 }
680 } 732 }
681 733
682 ret = bd2802_register_led_classdev(led); 734 ret = bd2802_register_led_classdev(led);
@@ -686,7 +738,8 @@ static int __devinit bd2802_probe(struct i2c_client *client,
686 return 0; 738 return 0;
687 739
688failed_unregister_dev_file: 740failed_unregister_dev_file:
689 device_remove_file(&client->dev, &bd2802_adv_conf_attr); 741 for (i--; i >= 0; i--)
742 device_remove_file(&led->client->dev, bd2802_attributes[i]);
690failed_free: 743failed_free:
691 i2c_set_clientdata(client, NULL); 744 i2c_set_clientdata(client, NULL);
692 kfree(led); 745 kfree(led);
@@ -697,12 +750,14 @@ failed_free:
697static int __exit bd2802_remove(struct i2c_client *client) 750static int __exit bd2802_remove(struct i2c_client *client)
698{ 751{
699 struct bd2802_led *led = i2c_get_clientdata(client); 752 struct bd2802_led *led = i2c_get_clientdata(client);
753 int i;
700 754
701 bd2802_unregister_led_classdev(led);
702 gpio_set_value(led->pdata->reset_gpio, 0); 755 gpio_set_value(led->pdata->reset_gpio, 0);
756 bd2802_unregister_led_classdev(led);
703 if (led->adf_on) 757 if (led->adf_on)
704 bd2802_disable_adv_conf(led); 758 bd2802_disable_adv_conf(led);
705 device_remove_file(&client->dev, &bd2802_adv_conf_attr); 759 for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
760 device_remove_file(&led->client->dev, bd2802_attributes[i]);
706 i2c_set_clientdata(client, NULL); 761 i2c_set_clientdata(client, NULL);
707 kfree(led); 762 kfree(led);
708 763
@@ -723,8 +778,7 @@ static int bd2802_resume(struct i2c_client *client)
723 struct bd2802_led *led = i2c_get_clientdata(client); 778 struct bd2802_led *led = i2c_get_clientdata(client);
724 779
725 if (!bd2802_is_all_off(led) || led->adf_on) { 780 if (!bd2802_is_all_off(led) || led->adf_on) {
726 gpio_set_value(led->pdata->reset_gpio, 1); 781 bd2802_reset_cancel(led);
727 udelay(100);
728 bd2802_restore_state(led); 782 bd2802_restore_state(led);
729 } 783 }
730 784
@@ -762,4 +816,4 @@ module_exit(bd2802_exit);
762 816
763MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>"); 817MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
764MODULE_DESCRIPTION("BD2802 LED driver"); 818MODULE_DESCRIPTION("BD2802 LED driver");
765MODULE_LICENSE("GPL"); 819MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index ff0e8c3fbf9b..5f1ce810815f 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * LEDs driver for the Cobalt Raq series. 2 * LEDs driver for the Cobalt Raq series.
3 * 3 *
4 * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 4 * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index d2109054de85..6b06638eb5b4 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -76,7 +76,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
76 struct gpio_led_data *led_dat, struct device *parent, 76 struct gpio_led_data *led_dat, struct device *parent,
77 int (*blink_set)(unsigned, unsigned long *, unsigned long *)) 77 int (*blink_set)(unsigned, unsigned long *, unsigned long *))
78{ 78{
79 int ret; 79 int ret, state;
80 80
81 /* skip leds that aren't available */ 81 /* skip leds that aren't available */
82 if (!gpio_is_valid(template->gpio)) { 82 if (!gpio_is_valid(template->gpio)) {
@@ -99,11 +99,15 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
99 led_dat->cdev.blink_set = gpio_blink_set; 99 led_dat->cdev.blink_set = gpio_blink_set;
100 } 100 }
101 led_dat->cdev.brightness_set = gpio_led_set; 101 led_dat->cdev.brightness_set = gpio_led_set;
102 led_dat->cdev.brightness = LED_OFF; 102 if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
103 state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low;
104 else
105 state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
106 led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
103 if (!template->retain_state_suspended) 107 if (!template->retain_state_suspended)
104 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; 108 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
105 109
106 ret = gpio_direction_output(led_dat->gpio, led_dat->active_low); 110 ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
107 if (ret < 0) 111 if (ret < 0)
108 goto err; 112 goto err;
109 113
@@ -129,7 +133,7 @@ static void delete_gpio_led(struct gpio_led_data *led)
129} 133}
130 134
131#ifdef CONFIG_LEDS_GPIO_PLATFORM 135#ifdef CONFIG_LEDS_GPIO_PLATFORM
132static int gpio_led_probe(struct platform_device *pdev) 136static int __devinit gpio_led_probe(struct platform_device *pdev)
133{ 137{
134 struct gpio_led_platform_data *pdata = pdev->dev.platform_data; 138 struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
135 struct gpio_led_data *leds_data; 139 struct gpio_led_data *leds_data;
@@ -223,12 +227,22 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
223 memset(&led, 0, sizeof(led)); 227 memset(&led, 0, sizeof(led));
224 for_each_child_of_node(np, child) { 228 for_each_child_of_node(np, child) {
225 enum of_gpio_flags flags; 229 enum of_gpio_flags flags;
230 const char *state;
226 231
227 led.gpio = of_get_gpio_flags(child, 0, &flags); 232 led.gpio = of_get_gpio_flags(child, 0, &flags);
228 led.active_low = flags & OF_GPIO_ACTIVE_LOW; 233 led.active_low = flags & OF_GPIO_ACTIVE_LOW;
229 led.name = of_get_property(child, "label", NULL) ? : child->name; 234 led.name = of_get_property(child, "label", NULL) ? : child->name;
230 led.default_trigger = 235 led.default_trigger =
231 of_get_property(child, "linux,default-trigger", NULL); 236 of_get_property(child, "linux,default-trigger", NULL);
237 state = of_get_property(child, "default-state", NULL);
238 if (state) {
239 if (!strcmp(state, "keep"))
240 led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
241 else if(!strcmp(state, "on"))
242 led.default_state = LEDS_GPIO_DEFSTATE_ON;
243 else
244 led.default_state = LEDS_GPIO_DEFSTATE_OFF;
245 }
232 246
233 ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++], 247 ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++],
234 &ofdev->dev, NULL); 248 &ofdev->dev, NULL);
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
new file mode 100644
index 000000000000..5946208ba26e
--- /dev/null
+++ b/drivers/leds/leds-lp3944.c
@@ -0,0 +1,466 @@
1/*
2 * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip
3 *
4 * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12/*
13 * I2C driver for National Semiconductor LP3944 Funlight Chip
14 * http://www.national.com/pf/LP/LP3944.html
15 *
16 * This helper chip can drive up to 8 leds, with two programmable DIM modes;
17 * it could even be used as a gpio expander but this driver assumes it is used
18 * as a led controller.
19 *
20 * The DIM modes are used to set _blink_ patterns for leds, the pattern is
21 * specified supplying two parameters:
22 * - period: from 0s to 1.6s
23 * - duty cycle: percentage of the period the led is on, from 0 to 100
24 *
25 * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb
26 * leds, the camera flash light and the displays backlights.
27 */
28
29#include <linux/module.h>
30#include <linux/i2c.h>
31#include <linux/leds.h>
32#include <linux/mutex.h>
33#include <linux/workqueue.h>
34#include <linux/leds-lp3944.h>
35
36/* Read Only Registers */
37#define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */
38#define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */
39
40#define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */
41#define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */
42#define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */
43#define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */
44#define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */
45#define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */
46
47/* These registers are not used to control leds in LP3944, they can store
48 * arbitrary values which the chip will ignore.
49 */
50#define LP3944_REG_REGISTER8 0x08
51#define LP3944_REG_REGISTER9 0x09
52
53#define LP3944_DIM0 0
54#define LP3944_DIM1 1
55
56/* period in ms */
57#define LP3944_PERIOD_MIN 0
58#define LP3944_PERIOD_MAX 1600
59
60/* duty cycle is a percentage */
61#define LP3944_DUTY_CYCLE_MIN 0
62#define LP3944_DUTY_CYCLE_MAX 100
63
64#define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev)
65
66/* Saved data */
67struct lp3944_led_data {
68 u8 id;
69 enum lp3944_type type;
70 enum lp3944_status status;
71 struct led_classdev ldev;
72 struct i2c_client *client;
73 struct work_struct work;
74};
75
76struct lp3944_data {
77 struct mutex lock;
78 struct i2c_client *client;
79 struct lp3944_led_data leds[LP3944_LEDS_MAX];
80};
81
82static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value)
83{
84 int tmp;
85
86 tmp = i2c_smbus_read_byte_data(client, reg);
87 if (tmp < 0)
88 return -EINVAL;
89
90 *value = tmp;
91
92 return 0;
93}
94
95static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value)
96{
97 return i2c_smbus_write_byte_data(client, reg, value);
98}
99
100/**
101 * Set the period for DIM status
102 *
103 * @client: the i2c client
104 * @dim: either LP3944_DIM0 or LP3944_DIM1
105 * @period: period of a blink, that is a on/off cycle, expressed in ms.
106 */
107static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period)
108{
109 u8 psc_reg;
110 u8 psc_value;
111 int err;
112
113 if (dim == LP3944_DIM0)
114 psc_reg = LP3944_REG_PSC0;
115 else if (dim == LP3944_DIM1)
116 psc_reg = LP3944_REG_PSC1;
117 else
118 return -EINVAL;
119
120 /* Convert period to Prescaler value */
121 if (period > LP3944_PERIOD_MAX)
122 return -EINVAL;
123
124 psc_value = (period * 255) / LP3944_PERIOD_MAX;
125
126 err = lp3944_reg_write(client, psc_reg, psc_value);
127
128 return err;
129}
130
131/**
132 * Set the duty cycle for DIM status
133 *
134 * @client: the i2c client
135 * @dim: either LP3944_DIM0 or LP3944_DIM1
136 * @duty_cycle: percentage of a period during which a led is ON
137 */
138static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim,
139 u8 duty_cycle)
140{
141 u8 pwm_reg;
142 u8 pwm_value;
143 int err;
144
145 if (dim == LP3944_DIM0)
146 pwm_reg = LP3944_REG_PWM0;
147 else if (dim == LP3944_DIM1)
148 pwm_reg = LP3944_REG_PWM1;
149 else
150 return -EINVAL;
151
152 /* Convert duty cycle to PWM value */
153 if (duty_cycle > LP3944_DUTY_CYCLE_MAX)
154 return -EINVAL;
155
156 pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX;
157
158 err = lp3944_reg_write(client, pwm_reg, pwm_value);
159
160 return err;
161}
162
163/**
164 * Set the led status
165 *
166 * @led: a lp3944_led_data structure
167 * @status: one of LP3944_LED_STATUS_OFF
168 * LP3944_LED_STATUS_ON
169 * LP3944_LED_STATUS_DIM0
170 * LP3944_LED_STATUS_DIM1
171 */
172static int lp3944_led_set(struct lp3944_led_data *led, u8 status)
173{
174 struct lp3944_data *data = i2c_get_clientdata(led->client);
175 u8 id = led->id;
176 u8 reg;
177 u8 val = 0;
178 int err;
179
180 dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n",
181 __func__, led->ldev.name, status);
182
183 switch (id) {
184 case LP3944_LED0:
185 case LP3944_LED1:
186 case LP3944_LED2:
187 case LP3944_LED3:
188 reg = LP3944_REG_LS0;
189 break;
190 case LP3944_LED4:
191 case LP3944_LED5:
192 case LP3944_LED6:
193 case LP3944_LED7:
194 id -= LP3944_LED4;
195 reg = LP3944_REG_LS1;
196 break;
197 default:
198 return -EINVAL;
199 }
200
201 if (status > LP3944_LED_STATUS_DIM1)
202 return -EINVAL;
203
204 /* invert only 0 and 1, leave unchanged the other values,
205 * remember we are abusing status to set blink patterns
206 */
207 if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
208 status = 1 - status;
209
210 mutex_lock(&data->lock);
211 lp3944_reg_read(led->client, reg, &val);
212
213 val &= ~(LP3944_LED_STATUS_MASK << (id << 1));
214 val |= (status << (id << 1));
215
216 dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n",
217 __func__, led->ldev.name, reg, id, status, val);
218
219 /* set led status */
220 err = lp3944_reg_write(led->client, reg, val);
221 mutex_unlock(&data->lock);
222
223 return err;
224}
225
226static int lp3944_led_set_blink(struct led_classdev *led_cdev,
227 unsigned long *delay_on,
228 unsigned long *delay_off)
229{
230 struct lp3944_led_data *led = ldev_to_led(led_cdev);
231 u16 period;
232 u8 duty_cycle;
233 int err;
234
235 /* units are in ms */
236 if (*delay_on + *delay_off > LP3944_PERIOD_MAX)
237 return -EINVAL;
238
239 if (*delay_on == 0 && *delay_off == 0) {
240 /* Special case: the leds subsystem requires a default user
241 * friendly blink pattern for the LED. Let's blink the led
242 * slowly (1Hz).
243 */
244 *delay_on = 500;
245 *delay_off = 500;
246 }
247
248 period = (*delay_on) + (*delay_off);
249
250 /* duty_cycle is the percentage of period during which the led is ON */
251 duty_cycle = 100 * (*delay_on) / period;
252
253 /* invert duty cycle for inverted leds, this has the same effect of
254 * swapping delay_on and delay_off
255 */
256 if (led->type == LP3944_LED_TYPE_LED_INVERTED)
257 duty_cycle = 100 - duty_cycle;
258
259 /* NOTE: using always the first DIM mode, this means that all leds
260 * will have the same blinking pattern.
261 *
262 * We could find a way later to have two leds blinking in hardware
263 * with different patterns at the same time, falling back to software
264 * control for the other ones.
265 */
266 err = lp3944_dim_set_period(led->client, LP3944_DIM0, period);
267 if (err)
268 return err;
269
270 err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle);
271 if (err)
272 return err;
273
274 dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n",
275 __func__);
276
277 led->status = LP3944_LED_STATUS_DIM0;
278 schedule_work(&led->work);
279
280 return 0;
281}
282
283static void lp3944_led_set_brightness(struct led_classdev *led_cdev,
284 enum led_brightness brightness)
285{
286 struct lp3944_led_data *led = ldev_to_led(led_cdev);
287
288 dev_dbg(&led->client->dev, "%s: %s, %d\n",
289 __func__, led_cdev->name, brightness);
290
291 led->status = brightness;
292 schedule_work(&led->work);
293}
294
295static void lp3944_led_work(struct work_struct *work)
296{
297 struct lp3944_led_data *led;
298
299 led = container_of(work, struct lp3944_led_data, work);
300 lp3944_led_set(led, led->status);
301}
302
303static int lp3944_configure(struct i2c_client *client,
304 struct lp3944_data *data,
305 struct lp3944_platform_data *pdata)
306{
307 int i, err = 0;
308
309 for (i = 0; i < pdata->leds_size; i++) {
310 struct lp3944_led *pled = &pdata->leds[i];
311 struct lp3944_led_data *led = &data->leds[i];
312 led->client = client;
313 led->id = i;
314
315 switch (pled->type) {
316
317 case LP3944_LED_TYPE_LED:
318 case LP3944_LED_TYPE_LED_INVERTED:
319 led->type = pled->type;
320 led->status = pled->status;
321 led->ldev.name = pled->name;
322 led->ldev.max_brightness = 1;
323 led->ldev.brightness_set = lp3944_led_set_brightness;
324 led->ldev.blink_set = lp3944_led_set_blink;
325 led->ldev.flags = LED_CORE_SUSPENDRESUME;
326
327 INIT_WORK(&led->work, lp3944_led_work);
328 err = led_classdev_register(&client->dev, &led->ldev);
329 if (err < 0) {
330 dev_err(&client->dev,
331 "couldn't register LED %s\n",
332 led->ldev.name);
333 goto exit;
334 }
335
336 /* to expose the default value to userspace */
337 led->ldev.brightness = led->status;
338
339 /* Set the default led status */
340 err = lp3944_led_set(led, led->status);
341 if (err < 0) {
342 dev_err(&client->dev,
343 "%s couldn't set STATUS %d\n",
344 led->ldev.name, led->status);
345 goto exit;
346 }
347 break;
348
349 case LP3944_LED_TYPE_NONE:
350 default:
351 break;
352
353 }
354 }
355 return 0;
356
357exit:
358 if (i > 0)
359 for (i = i - 1; i >= 0; i--)
360 switch (pdata->leds[i].type) {
361
362 case LP3944_LED_TYPE_LED:
363 case LP3944_LED_TYPE_LED_INVERTED:
364 led_classdev_unregister(&data->leds[i].ldev);
365 cancel_work_sync(&data->leds[i].work);
366 break;
367
368 case LP3944_LED_TYPE_NONE:
369 default:
370 break;
371 }
372
373 return err;
374}
375
376static int __devinit lp3944_probe(struct i2c_client *client,
377 const struct i2c_device_id *id)
378{
379 struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
380 struct lp3944_data *data;
381
382 if (lp3944_pdata == NULL) {
383 dev_err(&client->dev, "no platform data\n");
384 return -EINVAL;
385 }
386
387 /* Let's see whether this adapter can support what we need. */
388 if (!i2c_check_functionality(client->adapter,
389 I2C_FUNC_SMBUS_BYTE_DATA)) {
390 dev_err(&client->dev, "insufficient functionality!\n");
391 return -ENODEV;
392 }
393
394 data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL);
395 if (!data)
396 return -ENOMEM;
397
398 data->client = client;
399 i2c_set_clientdata(client, data);
400
401 mutex_init(&data->lock);
402
403 dev_info(&client->dev, "lp3944 enabled\n");
404
405 lp3944_configure(client, data, lp3944_pdata);
406 return 0;
407}
408
409static int __devexit lp3944_remove(struct i2c_client *client)
410{
411 struct lp3944_platform_data *pdata = client->dev.platform_data;
412 struct lp3944_data *data = i2c_get_clientdata(client);
413 int i;
414
415 for (i = 0; i < pdata->leds_size; i++)
416 switch (data->leds[i].type) {
417 case LP3944_LED_TYPE_LED:
418 case LP3944_LED_TYPE_LED_INVERTED:
419 led_classdev_unregister(&data->leds[i].ldev);
420 cancel_work_sync(&data->leds[i].work);
421 break;
422
423 case LP3944_LED_TYPE_NONE:
424 default:
425 break;
426 }
427
428 kfree(data);
429 i2c_set_clientdata(client, NULL);
430
431 return 0;
432}
433
434/* lp3944 i2c driver struct */
435static const struct i2c_device_id lp3944_id[] = {
436 {"lp3944", 0},
437 {}
438};
439
440MODULE_DEVICE_TABLE(i2c, lp3944_id);
441
442static struct i2c_driver lp3944_driver = {
443 .driver = {
444 .name = "lp3944",
445 },
446 .probe = lp3944_probe,
447 .remove = __devexit_p(lp3944_remove),
448 .id_table = lp3944_id,
449};
450
451static int __init lp3944_module_init(void)
452{
453 return i2c_add_driver(&lp3944_driver);
454}
455
456static void __exit lp3944_module_exit(void)
457{
458 i2c_del_driver(&lp3944_driver);
459}
460
461module_init(lp3944_module_init);
462module_exit(lp3944_module_exit);
463
464MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
465MODULE_DESCRIPTION("LP3944 Fun Light Chip");
466MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 3937244fdcab..dba8921240f2 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -35,7 +35,7 @@ struct pca9532_data {
35 struct pca9532_led leds[16]; 35 struct pca9532_led leds[16];
36 struct mutex update_lock; 36 struct mutex update_lock;
37 struct input_dev *idev; 37 struct input_dev *idev;
38 struct work_struct work; 38 struct work_struct work;
39 u8 pwm[2]; 39 u8 pwm[2];
40 u8 psc[2]; 40 u8 psc[2];
41}; 41};
@@ -87,14 +87,14 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
87 if (b > 0xFF) 87 if (b > 0xFF)
88 return -EINVAL; 88 return -EINVAL;
89 data->pwm[pwm] = b; 89 data->pwm[pwm] = b;
90 data->psc[pwm] = blink; 90 data->psc[pwm] = blink;
91 return 0; 91 return 0;
92} 92}
93 93
94static int pca9532_setpwm(struct i2c_client *client, int pwm) 94static int pca9532_setpwm(struct i2c_client *client, int pwm)
95{ 95{
96 struct pca9532_data *data = i2c_get_clientdata(client); 96 struct pca9532_data *data = i2c_get_clientdata(client);
97 mutex_lock(&data->update_lock); 97 mutex_lock(&data->update_lock);
98 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), 98 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
99 data->pwm[pwm]); 99 data->pwm[pwm]);
100 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), 100 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
@@ -132,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev,
132 led->state = PCA9532_ON; 132 led->state = PCA9532_ON;
133 else { 133 else {
134 led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */ 134 led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
135 err = pca9532_calcpwm(led->client, 0, 0, value); 135 err = pca9532_calcpwm(led->client, 0, 0, value);
136 if (err) 136 if (err)
137 return; /* XXX: led api doesn't allow error code? */ 137 return; /* XXX: led api doesn't allow error code? */
138 } 138 }
139 schedule_work(&led->work); 139 schedule_work(&led->work);
140} 140}
141 141
142static int pca9532_set_blink(struct led_classdev *led_cdev, 142static int pca9532_set_blink(struct led_classdev *led_cdev,
@@ -145,7 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
145 struct pca9532_led *led = ldev_to_led(led_cdev); 145 struct pca9532_led *led = ldev_to_led(led_cdev);
146 struct i2c_client *client = led->client; 146 struct i2c_client *client = led->client;
147 int psc; 147 int psc;
148 int err = 0; 148 int err = 0;
149 149
150 if (*delay_on == 0 && *delay_off == 0) { 150 if (*delay_on == 0 && *delay_off == 0) {
151 /* led subsystem ask us for a blink rate */ 151 /* led subsystem ask us for a blink rate */
@@ -157,11 +157,11 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
157 157
158 /* Thecus specific: only use PSC/PWM 0 */ 158 /* Thecus specific: only use PSC/PWM 0 */
159 psc = (*delay_on * 152-1)/1000; 159 psc = (*delay_on * 152-1)/1000;
160 err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness); 160 err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
161 if (err) 161 if (err)
162 return err; 162 return err;
163 schedule_work(&led->work); 163 schedule_work(&led->work);
164 return 0; 164 return 0;
165} 165}
166 166
167static int pca9532_event(struct input_dev *dev, unsigned int type, 167static int pca9532_event(struct input_dev *dev, unsigned int type,
@@ -178,15 +178,15 @@ static int pca9532_event(struct input_dev *dev, unsigned int type,
178 else 178 else
179 data->pwm[1] = 0; 179 data->pwm[1] = 0;
180 180
181 schedule_work(&data->work); 181 schedule_work(&data->work);
182 182
183 return 0; 183 return 0;
184} 184}
185 185
186static void pca9532_input_work(struct work_struct *work) 186static void pca9532_input_work(struct work_struct *work)
187{ 187{
188 struct pca9532_data *data; 188 struct pca9532_data *data;
189 data = container_of(work, struct pca9532_data, work); 189 data = container_of(work, struct pca9532_data, work);
190 mutex_lock(&data->update_lock); 190 mutex_lock(&data->update_lock);
191 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), 191 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
192 data->pwm[1]); 192 data->pwm[1]);
@@ -195,11 +195,11 @@ static void pca9532_input_work(struct work_struct *work)
195 195
196static void pca9532_led_work(struct work_struct *work) 196static void pca9532_led_work(struct work_struct *work)
197{ 197{
198 struct pca9532_led *led; 198 struct pca9532_led *led;
199 led = container_of(work, struct pca9532_led, work); 199 led = container_of(work, struct pca9532_led, work);
200 if (led->state == PCA9532_PWM0) 200 if (led->state == PCA9532_PWM0)
201 pca9532_setpwm(led->client, 0); 201 pca9532_setpwm(led->client, 0);
202 pca9532_setled(led); 202 pca9532_setled(led);
203} 203}
204 204
205static int pca9532_configure(struct i2c_client *client, 205static int pca9532_configure(struct i2c_client *client,
@@ -232,7 +232,7 @@ static int pca9532_configure(struct i2c_client *client,
232 led->ldev.brightness = LED_OFF; 232 led->ldev.brightness = LED_OFF;
233 led->ldev.brightness_set = pca9532_set_brightness; 233 led->ldev.brightness_set = pca9532_set_brightness;
234 led->ldev.blink_set = pca9532_set_blink; 234 led->ldev.blink_set = pca9532_set_blink;
235 INIT_WORK(&led->work, pca9532_led_work); 235 INIT_WORK(&led->work, pca9532_led_work);
236 err = led_classdev_register(&client->dev, &led->ldev); 236 err = led_classdev_register(&client->dev, &led->ldev);
237 if (err < 0) { 237 if (err < 0) {
238 dev_err(&client->dev, 238 dev_err(&client->dev,
@@ -262,11 +262,11 @@ static int pca9532_configure(struct i2c_client *client,
262 BIT_MASK(SND_TONE); 262 BIT_MASK(SND_TONE);
263 data->idev->event = pca9532_event; 263 data->idev->event = pca9532_event;
264 input_set_drvdata(data->idev, data); 264 input_set_drvdata(data->idev, data);
265 INIT_WORK(&data->work, pca9532_input_work); 265 INIT_WORK(&data->work, pca9532_input_work);
266 err = input_register_device(data->idev); 266 err = input_register_device(data->idev);
267 if (err) { 267 if (err) {
268 input_free_device(data->idev); 268 input_free_device(data->idev);
269 cancel_work_sync(&data->work); 269 cancel_work_sync(&data->work);
270 data->idev = NULL; 270 data->idev = NULL;
271 goto exit; 271 goto exit;
272 } 272 }
@@ -283,13 +283,13 @@ exit:
283 break; 283 break;
284 case PCA9532_TYPE_LED: 284 case PCA9532_TYPE_LED:
285 led_classdev_unregister(&data->leds[i].ldev); 285 led_classdev_unregister(&data->leds[i].ldev);
286 cancel_work_sync(&data->leds[i].work); 286 cancel_work_sync(&data->leds[i].work);
287 break; 287 break;
288 case PCA9532_TYPE_N2100_BEEP: 288 case PCA9532_TYPE_N2100_BEEP:
289 if (data->idev != NULL) { 289 if (data->idev != NULL) {
290 input_unregister_device(data->idev); 290 input_unregister_device(data->idev);
291 input_free_device(data->idev); 291 input_free_device(data->idev);
292 cancel_work_sync(&data->work); 292 cancel_work_sync(&data->work);
293 data->idev = NULL; 293 data->idev = NULL;
294 } 294 }
295 break; 295 break;
@@ -340,13 +340,13 @@ static int pca9532_remove(struct i2c_client *client)
340 break; 340 break;
341 case PCA9532_TYPE_LED: 341 case PCA9532_TYPE_LED:
342 led_classdev_unregister(&data->leds[i].ldev); 342 led_classdev_unregister(&data->leds[i].ldev);
343 cancel_work_sync(&data->leds[i].work); 343 cancel_work_sync(&data->leds[i].work);
344 break; 344 break;
345 case PCA9532_TYPE_N2100_BEEP: 345 case PCA9532_TYPE_N2100_BEEP:
346 if (data->idev != NULL) { 346 if (data->idev != NULL) {
347 input_unregister_device(data->idev); 347 input_unregister_device(data->idev);
348 input_free_device(data->idev); 348 input_free_device(data->idev);
349 cancel_work_sync(&data->work); 349 cancel_work_sync(&data->work);
350 data->idev = NULL; 350 data->idev = NULL;
351 } 351 }
352 break; 352 break;
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index a6974e9b8ebf..1e2cb846b3c9 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -1,6 +1,8 @@
1/*P:400 This contains run_guest() which actually calls into the Host<->Guest 1/*P:400
2 * This contains run_guest() which actually calls into the Host<->Guest
2 * Switcher and analyzes the return, such as determining if the Guest wants the 3 * Switcher and analyzes the return, such as determining if the Guest wants the
3 * Host to do something. This file also contains useful helper routines. :*/ 4 * Host to do something. This file also contains useful helper routines.
5:*/
4#include <linux/module.h> 6#include <linux/module.h>
5#include <linux/stringify.h> 7#include <linux/stringify.h>
6#include <linux/stddef.h> 8#include <linux/stddef.h>
@@ -24,7 +26,8 @@ static struct page **switcher_page;
24/* This One Big lock protects all inter-guest data structures. */ 26/* This One Big lock protects all inter-guest data structures. */
25DEFINE_MUTEX(lguest_lock); 27DEFINE_MUTEX(lguest_lock);
26 28
27/*H:010 We need to set up the Switcher at a high virtual address. Remember the 29/*H:010
30 * We need to set up the Switcher at a high virtual address. Remember the
28 * Switcher is a few hundred bytes of assembler code which actually changes the 31 * Switcher is a few hundred bytes of assembler code which actually changes the
29 * CPU to run the Guest, and then changes back to the Host when a trap or 32 * CPU to run the Guest, and then changes back to the Host when a trap or
30 * interrupt happens. 33 * interrupt happens.
@@ -33,7 +36,8 @@ DEFINE_MUTEX(lguest_lock);
33 * Host since it will be running as the switchover occurs. 36 * Host since it will be running as the switchover occurs.
34 * 37 *
35 * Trying to map memory at a particular address is an unusual thing to do, so 38 * Trying to map memory at a particular address is an unusual thing to do, so
36 * it's not a simple one-liner. */ 39 * it's not a simple one-liner.
40 */
37static __init int map_switcher(void) 41static __init int map_switcher(void)
38{ 42{
39 int i, err; 43 int i, err;
@@ -47,8 +51,10 @@ static __init int map_switcher(void)
47 * easy. 51 * easy.
48 */ 52 */
49 53
50 /* We allocate an array of struct page pointers. map_vm_area() wants 54 /*
51 * this, rather than just an array of pages. */ 55 * We allocate an array of struct page pointers. map_vm_area() wants
56 * this, rather than just an array of pages.
57 */
52 switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES, 58 switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
53 GFP_KERNEL); 59 GFP_KERNEL);
54 if (!switcher_page) { 60 if (!switcher_page) {
@@ -56,8 +62,10 @@ static __init int map_switcher(void)
56 goto out; 62 goto out;
57 } 63 }
58 64
59 /* Now we actually allocate the pages. The Guest will see these pages, 65 /*
60 * so we make sure they're zeroed. */ 66 * Now we actually allocate the pages. The Guest will see these pages,
67 * so we make sure they're zeroed.
68 */
61 for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { 69 for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
62 unsigned long addr = get_zeroed_page(GFP_KERNEL); 70 unsigned long addr = get_zeroed_page(GFP_KERNEL);
63 if (!addr) { 71 if (!addr) {
@@ -67,19 +75,23 @@ static __init int map_switcher(void)
67 switcher_page[i] = virt_to_page(addr); 75 switcher_page[i] = virt_to_page(addr);
68 } 76 }
69 77
70 /* First we check that the Switcher won't overlap the fixmap area at 78 /*
79 * First we check that the Switcher won't overlap the fixmap area at
71 * the top of memory. It's currently nowhere near, but it could have 80 * the top of memory. It's currently nowhere near, but it could have
72 * very strange effects if it ever happened. */ 81 * very strange effects if it ever happened.
82 */
73 if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){ 83 if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){
74 err = -ENOMEM; 84 err = -ENOMEM;
75 printk("lguest: mapping switcher would thwack fixmap\n"); 85 printk("lguest: mapping switcher would thwack fixmap\n");
76 goto free_pages; 86 goto free_pages;
77 } 87 }
78 88
79 /* Now we reserve the "virtual memory area" we want: 0xFFC00000 89 /*
90 * Now we reserve the "virtual memory area" we want: 0xFFC00000
80 * (SWITCHER_ADDR). We might not get it in theory, but in practice 91 * (SWITCHER_ADDR). We might not get it in theory, but in practice
81 * it's worked so far. The end address needs +1 because __get_vm_area 92 * it's worked so far. The end address needs +1 because __get_vm_area
82 * allocates an extra guard page, so we need space for that. */ 93 * allocates an extra guard page, so we need space for that.
94 */
83 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, 95 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
84 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR 96 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
85 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); 97 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
@@ -89,11 +101,13 @@ static __init int map_switcher(void)
89 goto free_pages; 101 goto free_pages;
90 } 102 }
91 103
92 /* This code actually sets up the pages we've allocated to appear at 104 /*
105 * This code actually sets up the pages we've allocated to appear at
93 * SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the 106 * SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the
94 * kind of pages we're mapping (kernel pages), and a pointer to our 107 * kind of pages we're mapping (kernel pages), and a pointer to our
95 * array of struct pages. It increments that pointer, but we don't 108 * array of struct pages. It increments that pointer, but we don't
96 * care. */ 109 * care.
110 */
97 pagep = switcher_page; 111 pagep = switcher_page;
98 err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); 112 err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
99 if (err) { 113 if (err) {
@@ -101,8 +115,10 @@ static __init int map_switcher(void)
101 goto free_vma; 115 goto free_vma;
102 } 116 }
103 117
104 /* Now the Switcher is mapped at the right address, we can't fail! 118 /*
105 * Copy in the compiled-in Switcher code (from <arch>_switcher.S). */ 119 * Now the Switcher is mapped at the right address, we can't fail!
120 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
121 */
106 memcpy(switcher_vma->addr, start_switcher_text, 122 memcpy(switcher_vma->addr, start_switcher_text,
107 end_switcher_text - start_switcher_text); 123 end_switcher_text - start_switcher_text);
108 124
@@ -124,8 +140,7 @@ out:
124} 140}
125/*:*/ 141/*:*/
126 142
127/* Cleaning up the mapping when the module is unloaded is almost... 143/* Cleaning up the mapping when the module is unloaded is almost... too easy. */
128 * too easy. */
129static void unmap_switcher(void) 144static void unmap_switcher(void)
130{ 145{
131 unsigned int i; 146 unsigned int i;
@@ -151,16 +166,19 @@ static void unmap_switcher(void)
151 * But we can't trust the Guest: it might be trying to access the Launcher 166 * But we can't trust the Guest: it might be trying to access the Launcher
152 * code. We have to check that the range is below the pfn_limit the Launcher 167 * code. We have to check that the range is below the pfn_limit the Launcher
153 * gave us. We have to make sure that addr + len doesn't give us a false 168 * gave us. We have to make sure that addr + len doesn't give us a false
154 * positive by overflowing, too. */ 169 * positive by overflowing, too.
170 */
155bool lguest_address_ok(const struct lguest *lg, 171bool lguest_address_ok(const struct lguest *lg,
156 unsigned long addr, unsigned long len) 172 unsigned long addr, unsigned long len)
157{ 173{
158 return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); 174 return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
159} 175}
160 176
161/* This routine copies memory from the Guest. Here we can see how useful the 177/*
178 * This routine copies memory from the Guest. Here we can see how useful the
162 * kill_lguest() routine we met in the Launcher can be: we return a random 179 * kill_lguest() routine we met in the Launcher can be: we return a random
163 * value (all zeroes) instead of needing to return an error. */ 180 * value (all zeroes) instead of needing to return an error.
181 */
164void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) 182void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes)
165{ 183{
166 if (!lguest_address_ok(cpu->lg, addr, bytes) 184 if (!lguest_address_ok(cpu->lg, addr, bytes)
@@ -181,9 +199,11 @@ void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b,
181} 199}
182/*:*/ 200/*:*/
183 201
184/*H:030 Let's jump straight to the the main loop which runs the Guest. 202/*H:030
203 * Let's jump straight to the the main loop which runs the Guest.
185 * Remember, this is called by the Launcher reading /dev/lguest, and we keep 204 * Remember, this is called by the Launcher reading /dev/lguest, and we keep
186 * going around and around until something interesting happens. */ 205 * going around and around until something interesting happens.
206 */
187int run_guest(struct lg_cpu *cpu, unsigned long __user *user) 207int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
188{ 208{
189 /* We stop running once the Guest is dead. */ 209 /* We stop running once the Guest is dead. */
@@ -195,10 +215,17 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
195 if (cpu->hcall) 215 if (cpu->hcall)
196 do_hypercalls(cpu); 216 do_hypercalls(cpu);
197 217
198 /* It's possible the Guest did a NOTIFY hypercall to the 218 /*
199 * Launcher, in which case we return from the read() now. */ 219 * It's possible the Guest did a NOTIFY hypercall to the
220 * Launcher.
221 */
200 if (cpu->pending_notify) { 222 if (cpu->pending_notify) {
223 /*
224 * Does it just needs to write to a registered
225 * eventfd (ie. the appropriate virtqueue thread)?
226 */
201 if (!send_notify_to_eventfd(cpu)) { 227 if (!send_notify_to_eventfd(cpu)) {
228 /* OK, we tell the main Laucher. */
202 if (put_user(cpu->pending_notify, user)) 229 if (put_user(cpu->pending_notify, user))
203 return -EFAULT; 230 return -EFAULT;
204 return sizeof(cpu->pending_notify); 231 return sizeof(cpu->pending_notify);
@@ -209,29 +236,39 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
209 if (signal_pending(current)) 236 if (signal_pending(current))
210 return -ERESTARTSYS; 237 return -ERESTARTSYS;
211 238
212 /* Check if there are any interrupts which can be delivered now: 239 /*
240 * Check if there are any interrupts which can be delivered now:
213 * if so, this sets up the hander to be executed when we next 241 * if so, this sets up the hander to be executed when we next
214 * run the Guest. */ 242 * run the Guest.
243 */
215 irq = interrupt_pending(cpu, &more); 244 irq = interrupt_pending(cpu, &more);
216 if (irq < LGUEST_IRQS) 245 if (irq < LGUEST_IRQS)
217 try_deliver_interrupt(cpu, irq, more); 246 try_deliver_interrupt(cpu, irq, more);
218 247
219 /* All long-lived kernel loops need to check with this horrible 248 /*
249 * All long-lived kernel loops need to check with this horrible
220 * thing called the freezer. If the Host is trying to suspend, 250 * thing called the freezer. If the Host is trying to suspend,
221 * it stops us. */ 251 * it stops us.
252 */
222 try_to_freeze(); 253 try_to_freeze();
223 254
224 /* Just make absolutely sure the Guest is still alive. One of 255 /*
225 * those hypercalls could have been fatal, for example. */ 256 * Just make absolutely sure the Guest is still alive. One of
257 * those hypercalls could have been fatal, for example.
258 */
226 if (cpu->lg->dead) 259 if (cpu->lg->dead)
227 break; 260 break;
228 261
229 /* If the Guest asked to be stopped, we sleep. The Guest's 262 /*
230 * clock timer will wake us. */ 263 * If the Guest asked to be stopped, we sleep. The Guest's
264 * clock timer will wake us.
265 */
231 if (cpu->halted) { 266 if (cpu->halted) {
232 set_current_state(TASK_INTERRUPTIBLE); 267 set_current_state(TASK_INTERRUPTIBLE);
233 /* Just before we sleep, make sure no interrupt snuck in 268 /*
234 * which we should be doing. */ 269 * Just before we sleep, make sure no interrupt snuck in
270 * which we should be doing.
271 */
235 if (interrupt_pending(cpu, &more) < LGUEST_IRQS) 272 if (interrupt_pending(cpu, &more) < LGUEST_IRQS)
236 set_current_state(TASK_RUNNING); 273 set_current_state(TASK_RUNNING);
237 else 274 else
@@ -239,8 +276,10 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
239 continue; 276 continue;
240 } 277 }
241 278
242 /* OK, now we're ready to jump into the Guest. First we put up 279 /*
243 * the "Do Not Disturb" sign: */ 280 * OK, now we're ready to jump into the Guest. First we put up
281 * the "Do Not Disturb" sign:
282 */
244 local_irq_disable(); 283 local_irq_disable();
245 284
246 /* Actually run the Guest until something happens. */ 285 /* Actually run the Guest until something happens. */
@@ -327,8 +366,10 @@ static void __exit fini(void)
327} 366}
328/*:*/ 367/*:*/
329 368
330/* The Host side of lguest can be a module. This is a nice way for people to 369/*
331 * play with it. */ 370 * The Host side of lguest can be a module. This is a nice way for people to
371 * play with it.
372 */
332module_init(init); 373module_init(init);
333module_exit(fini); 374module_exit(fini);
334MODULE_LICENSE("GPL"); 375MODULE_LICENSE("GPL");
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index c29ffa19cb74..83511eb0923d 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -1,8 +1,10 @@
1/*P:500 Just as userspace programs request kernel operations through a system 1/*P:500
2 * Just as userspace programs request kernel operations through a system
2 * call, the Guest requests Host operations through a "hypercall". You might 3 * call, the Guest requests Host operations through a "hypercall". You might
3 * notice this nomenclature doesn't really follow any logic, but the name has 4 * notice this nomenclature doesn't really follow any logic, but the name has
4 * been around for long enough that we're stuck with it. As you'd expect, this 5 * been around for long enough that we're stuck with it. As you'd expect, this
5 * code is basically a one big switch statement. :*/ 6 * code is basically a one big switch statement.
7:*/
6 8
7/* Copyright (C) 2006 Rusty Russell IBM Corporation 9/* Copyright (C) 2006 Rusty Russell IBM Corporation
8 10
@@ -28,30 +30,41 @@
28#include <asm/pgtable.h> 30#include <asm/pgtable.h>
29#include "lg.h" 31#include "lg.h"
30 32
31/*H:120 This is the core hypercall routine: where the Guest gets what it wants. 33/*H:120
32 * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. */ 34 * This is the core hypercall routine: where the Guest gets what it wants.
35 * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both.
36 */
33static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) 37static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
34{ 38{
35 switch (args->arg0) { 39 switch (args->arg0) {
36 case LHCALL_FLUSH_ASYNC: 40 case LHCALL_FLUSH_ASYNC:
37 /* This call does nothing, except by breaking out of the Guest 41 /*
38 * it makes us process all the asynchronous hypercalls. */ 42 * This call does nothing, except by breaking out of the Guest
43 * it makes us process all the asynchronous hypercalls.
44 */
39 break; 45 break;
40 case LHCALL_SEND_INTERRUPTS: 46 case LHCALL_SEND_INTERRUPTS:
41 /* This call does nothing too, but by breaking out of the Guest 47 /*
42 * it makes us process any pending interrupts. */ 48 * This call does nothing too, but by breaking out of the Guest
49 * it makes us process any pending interrupts.
50 */
43 break; 51 break;
44 case LHCALL_LGUEST_INIT: 52 case LHCALL_LGUEST_INIT:
45 /* You can't get here unless you're already initialized. Don't 53 /*
46 * do that. */ 54 * You can't get here unless you're already initialized. Don't
55 * do that.
56 */
47 kill_guest(cpu, "already have lguest_data"); 57 kill_guest(cpu, "already have lguest_data");
48 break; 58 break;
49 case LHCALL_SHUTDOWN: { 59 case LHCALL_SHUTDOWN: {
50 /* Shutdown is such a trivial hypercall that we do it in four
51 * lines right here. */
52 char msg[128]; 60 char msg[128];
53 /* If the lgread fails, it will call kill_guest() itself; the 61 /*
54 * kill_guest() with the message will be ignored. */ 62 * Shutdown is such a trivial hypercall that we do it in five
63 * lines right here.
64 *
65 * If the lgread fails, it will call kill_guest() itself; the
66 * kill_guest() with the message will be ignored.
67 */
55 __lgread(cpu, msg, args->arg1, sizeof(msg)); 68 __lgread(cpu, msg, args->arg1, sizeof(msg));
56 msg[sizeof(msg)-1] = '\0'; 69 msg[sizeof(msg)-1] = '\0';
57 kill_guest(cpu, "CRASH: %s", msg); 70 kill_guest(cpu, "CRASH: %s", msg);
@@ -60,16 +73,17 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
60 break; 73 break;
61 } 74 }
62 case LHCALL_FLUSH_TLB: 75 case LHCALL_FLUSH_TLB:
63 /* FLUSH_TLB comes in two flavors, depending on the 76 /* FLUSH_TLB comes in two flavors, depending on the argument: */
64 * argument: */
65 if (args->arg1) 77 if (args->arg1)
66 guest_pagetable_clear_all(cpu); 78 guest_pagetable_clear_all(cpu);
67 else 79 else
68 guest_pagetable_flush_user(cpu); 80 guest_pagetable_flush_user(cpu);
69 break; 81 break;
70 82
71 /* All these calls simply pass the arguments through to the right 83 /*
72 * routines. */ 84 * All these calls simply pass the arguments through to the right
85 * routines.
86 */
73 case LHCALL_NEW_PGTABLE: 87 case LHCALL_NEW_PGTABLE:
74 guest_new_pagetable(cpu, args->arg1); 88 guest_new_pagetable(cpu, args->arg1);
75 break; 89 break;
@@ -112,15 +126,16 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
112 kill_guest(cpu, "Bad hypercall %li\n", args->arg0); 126 kill_guest(cpu, "Bad hypercall %li\n", args->arg0);
113 } 127 }
114} 128}
115/*:*/
116 129
117/*H:124 Asynchronous hypercalls are easy: we just look in the array in the 130/*H:124
131 * Asynchronous hypercalls are easy: we just look in the array in the
118 * Guest's "struct lguest_data" to see if any new ones are marked "ready". 132 * Guest's "struct lguest_data" to see if any new ones are marked "ready".
119 * 133 *
120 * We are careful to do these in order: obviously we respect the order the 134 * We are careful to do these in order: obviously we respect the order the
121 * Guest put them in the ring, but we also promise the Guest that they will 135 * Guest put them in the ring, but we also promise the Guest that they will
122 * happen before any normal hypercall (which is why we check this before 136 * happen before any normal hypercall (which is why we check this before
123 * checking for a normal hcall). */ 137 * checking for a normal hcall).
138 */
124static void do_async_hcalls(struct lg_cpu *cpu) 139static void do_async_hcalls(struct lg_cpu *cpu)
125{ 140{
126 unsigned int i; 141 unsigned int i;
@@ -133,22 +148,28 @@ static void do_async_hcalls(struct lg_cpu *cpu)
133 /* We process "struct lguest_data"s hcalls[] ring once. */ 148 /* We process "struct lguest_data"s hcalls[] ring once. */
134 for (i = 0; i < ARRAY_SIZE(st); i++) { 149 for (i = 0; i < ARRAY_SIZE(st); i++) {
135 struct hcall_args args; 150 struct hcall_args args;
136 /* We remember where we were up to from last time. This makes 151 /*
152 * We remember where we were up to from last time. This makes
137 * sure that the hypercalls are done in the order the Guest 153 * sure that the hypercalls are done in the order the Guest
138 * places them in the ring. */ 154 * places them in the ring.
155 */
139 unsigned int n = cpu->next_hcall; 156 unsigned int n = cpu->next_hcall;
140 157
141 /* 0xFF means there's no call here (yet). */ 158 /* 0xFF means there's no call here (yet). */
142 if (st[n] == 0xFF) 159 if (st[n] == 0xFF)
143 break; 160 break;
144 161
145 /* OK, we have hypercall. Increment the "next_hcall" cursor, 162 /*
146 * and wrap back to 0 if we reach the end. */ 163 * OK, we have hypercall. Increment the "next_hcall" cursor,
164 * and wrap back to 0 if we reach the end.
165 */
147 if (++cpu->next_hcall == LHCALL_RING_SIZE) 166 if (++cpu->next_hcall == LHCALL_RING_SIZE)
148 cpu->next_hcall = 0; 167 cpu->next_hcall = 0;
149 168
150 /* Copy the hypercall arguments into a local copy of 169 /*
151 * the hcall_args struct. */ 170 * Copy the hypercall arguments into a local copy of the
171 * hcall_args struct.
172 */
152 if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n], 173 if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n],
153 sizeof(struct hcall_args))) { 174 sizeof(struct hcall_args))) {
154 kill_guest(cpu, "Fetching async hypercalls"); 175 kill_guest(cpu, "Fetching async hypercalls");
@@ -164,19 +185,25 @@ static void do_async_hcalls(struct lg_cpu *cpu)
164 break; 185 break;
165 } 186 }
166 187
167 /* Stop doing hypercalls if they want to notify the Launcher: 188 /*
168 * it needs to service this first. */ 189 * Stop doing hypercalls if they want to notify the Launcher:
190 * it needs to service this first.
191 */
169 if (cpu->pending_notify) 192 if (cpu->pending_notify)
170 break; 193 break;
171 } 194 }
172} 195}
173 196
174/* Last of all, we look at what happens first of all. The very first time the 197/*
175 * Guest makes a hypercall, we end up here to set things up: */ 198 * Last of all, we look at what happens first of all. The very first time the
199 * Guest makes a hypercall, we end up here to set things up:
200 */
176static void initialize(struct lg_cpu *cpu) 201static void initialize(struct lg_cpu *cpu)
177{ 202{
178 /* You can't do anything until you're initialized. The Guest knows the 203 /*
179 * rules, so we're unforgiving here. */ 204 * You can't do anything until you're initialized. The Guest knows the
205 * rules, so we're unforgiving here.
206 */
180 if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) { 207 if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) {
181 kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0); 208 kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0);
182 return; 209 return;
@@ -185,32 +212,44 @@ static void initialize(struct lg_cpu *cpu)
185 if (lguest_arch_init_hypercalls(cpu)) 212 if (lguest_arch_init_hypercalls(cpu))
186 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); 213 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
187 214
188 /* The Guest tells us where we're not to deliver interrupts by putting 215 /*
189 * the range of addresses into "struct lguest_data". */ 216 * The Guest tells us where we're not to deliver interrupts by putting
217 * the range of addresses into "struct lguest_data".
218 */
190 if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start) 219 if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start)
191 || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end)) 220 || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end))
192 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); 221 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
193 222
194 /* We write the current time into the Guest's data page once so it can 223 /*
195 * set its clock. */ 224 * We write the current time into the Guest's data page once so it can
225 * set its clock.
226 */
196 write_timestamp(cpu); 227 write_timestamp(cpu);
197 228
198 /* page_tables.c will also do some setup. */ 229 /* page_tables.c will also do some setup. */
199 page_table_guest_data_init(cpu); 230 page_table_guest_data_init(cpu);
200 231
201 /* This is the one case where the above accesses might have been the 232 /*
233 * This is the one case where the above accesses might have been the
202 * first write to a Guest page. This may have caused a copy-on-write 234 * first write to a Guest page. This may have caused a copy-on-write
203 * fault, but the old page might be (read-only) in the Guest 235 * fault, but the old page might be (read-only) in the Guest
204 * pagetable. */ 236 * pagetable.
237 */
205 guest_pagetable_clear_all(cpu); 238 guest_pagetable_clear_all(cpu);
206} 239}
207/*:*/ 240/*:*/
208 241
209/*M:013 If a Guest reads from a page (so creates a mapping) that it has never 242/*M:013
243 * If a Guest reads from a page (so creates a mapping) that it has never
210 * written to, and then the Launcher writes to it (ie. the output of a virtual 244 * written to, and then the Launcher writes to it (ie. the output of a virtual
211 * device), the Guest will still see the old page. In practice, this never 245 * device), the Guest will still see the old page. In practice, this never
212 * happens: why would the Guest read a page which it has never written to? But 246 * happens: why would the Guest read a page which it has never written to? But
213 * a similar scenario might one day bite us, so it's worth mentioning. :*/ 247 * a similar scenario might one day bite us, so it's worth mentioning.
248 *
249 * Note that if we used a shared anonymous mapping in the Launcher instead of
250 * mapping /dev/zero private, we wouldn't worry about cop-on-write. And we
251 * need that to switch the Launcher to processes (away from threads) anyway.
252:*/
214 253
215/*H:100 254/*H:100
216 * Hypercalls 255 * Hypercalls
@@ -229,17 +268,22 @@ void do_hypercalls(struct lg_cpu *cpu)
229 return; 268 return;
230 } 269 }
231 270
232 /* The Guest has initialized. 271 /*
272 * The Guest has initialized.
233 * 273 *
234 * Look in the hypercall ring for the async hypercalls: */ 274 * Look in the hypercall ring for the async hypercalls:
275 */
235 do_async_hcalls(cpu); 276 do_async_hcalls(cpu);
236 277
237 /* If we stopped reading the hypercall ring because the Guest did a 278 /*
279 * If we stopped reading the hypercall ring because the Guest did a
238 * NOTIFY to the Launcher, we want to return now. Otherwise we do 280 * NOTIFY to the Launcher, we want to return now. Otherwise we do
239 * the hypercall. */ 281 * the hypercall.
282 */
240 if (!cpu->pending_notify) { 283 if (!cpu->pending_notify) {
241 do_hcall(cpu, cpu->hcall); 284 do_hcall(cpu, cpu->hcall);
242 /* Tricky point: we reset the hcall pointer to mark the 285 /*
286 * Tricky point: we reset the hcall pointer to mark the
243 * hypercall as "done". We use the hcall pointer rather than 287 * hypercall as "done". We use the hcall pointer rather than
244 * the trap number to indicate a hypercall is pending. 288 * the trap number to indicate a hypercall is pending.
245 * Normally it doesn't matter: the Guest will run again and 289 * Normally it doesn't matter: the Guest will run again and
@@ -248,13 +292,16 @@ void do_hypercalls(struct lg_cpu *cpu)
248 * However, if we are signalled or the Guest sends I/O to the 292 * However, if we are signalled or the Guest sends I/O to the
249 * Launcher, the run_guest() loop will exit without running the 293 * Launcher, the run_guest() loop will exit without running the
250 * Guest. When it comes back it would try to re-run the 294 * Guest. When it comes back it would try to re-run the
251 * hypercall. Finding that bug sucked. */ 295 * hypercall. Finding that bug sucked.
296 */
252 cpu->hcall = NULL; 297 cpu->hcall = NULL;
253 } 298 }
254} 299}
255 300
256/* This routine supplies the Guest with time: it's used for wallclock time at 301/*
257 * initial boot and as a rough time source if the TSC isn't available. */ 302 * This routine supplies the Guest with time: it's used for wallclock time at
303 * initial boot and as a rough time source if the TSC isn't available.
304 */
258void write_timestamp(struct lg_cpu *cpu) 305void write_timestamp(struct lg_cpu *cpu)
259{ 306{
260 struct timespec now; 307 struct timespec now;
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 0e9067b0d507..18648180db02 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -1,4 +1,5 @@
1/*P:800 Interrupts (traps) are complicated enough to earn their own file. 1/*P:800
2 * Interrupts (traps) are complicated enough to earn their own file.
2 * There are three classes of interrupts: 3 * There are three classes of interrupts:
3 * 4 *
4 * 1) Real hardware interrupts which occur while we're running the Guest, 5 * 1) Real hardware interrupts which occur while we're running the Guest,
@@ -10,7 +11,8 @@
10 * just like real hardware would deliver them. Traps from the Guest can be set 11 * just like real hardware would deliver them. Traps from the Guest can be set
11 * up to go directly back into the Guest, but sometimes the Host wants to see 12 * up to go directly back into the Guest, but sometimes the Host wants to see
12 * them first, so we also have a way of "reflecting" them into the Guest as if 13 * them first, so we also have a way of "reflecting" them into the Guest as if
13 * they had been delivered to it directly. :*/ 14 * they had been delivered to it directly.
15:*/
14#include <linux/uaccess.h> 16#include <linux/uaccess.h>
15#include <linux/interrupt.h> 17#include <linux/interrupt.h>
16#include <linux/module.h> 18#include <linux/module.h>
@@ -26,8 +28,10 @@ static unsigned long idt_address(u32 lo, u32 hi)
26 return (lo & 0x0000FFFF) | (hi & 0xFFFF0000); 28 return (lo & 0x0000FFFF) | (hi & 0xFFFF0000);
27} 29}
28 30
29/* The "type" of the interrupt handler is a 4 bit field: we only support a 31/*
30 * couple of types. */ 32 * The "type" of the interrupt handler is a 4 bit field: we only support a
33 * couple of types.
34 */
31static int idt_type(u32 lo, u32 hi) 35static int idt_type(u32 lo, u32 hi)
32{ 36{
33 return (hi >> 8) & 0xF; 37 return (hi >> 8) & 0xF;
@@ -39,8 +43,10 @@ static bool idt_present(u32 lo, u32 hi)
39 return (hi & 0x8000); 43 return (hi & 0x8000);
40} 44}
41 45
42/* We need a helper to "push" a value onto the Guest's stack, since that's a 46/*
43 * big part of what delivering an interrupt does. */ 47 * We need a helper to "push" a value onto the Guest's stack, since that's a
48 * big part of what delivering an interrupt does.
49 */
44static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) 50static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
45{ 51{
46 /* Stack grows upwards: move stack then write value. */ 52 /* Stack grows upwards: move stack then write value. */
@@ -48,7 +54,8 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
48 lgwrite(cpu, *gstack, u32, val); 54 lgwrite(cpu, *gstack, u32, val);
49} 55}
50 56
51/*H:210 The set_guest_interrupt() routine actually delivers the interrupt or 57/*H:210
58 * The set_guest_interrupt() routine actually delivers the interrupt or
52 * trap. The mechanics of delivering traps and interrupts to the Guest are the 59 * trap. The mechanics of delivering traps and interrupts to the Guest are the
53 * same, except some traps have an "error code" which gets pushed onto the 60 * same, except some traps have an "error code" which gets pushed onto the
54 * stack as well: the caller tells us if this is one. 61 * stack as well: the caller tells us if this is one.
@@ -59,7 +66,8 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
59 * 66 *
60 * We set up the stack just like the CPU does for a real interrupt, so it's 67 * We set up the stack just like the CPU does for a real interrupt, so it's
61 * identical for the Guest (and the standard "iret" instruction will undo 68 * identical for the Guest (and the standard "iret" instruction will undo
62 * it). */ 69 * it).
70 */
63static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, 71static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
64 bool has_err) 72 bool has_err)
65{ 73{
@@ -67,20 +75,26 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
67 u32 eflags, ss, irq_enable; 75 u32 eflags, ss, irq_enable;
68 unsigned long virtstack; 76 unsigned long virtstack;
69 77
70 /* There are two cases for interrupts: one where the Guest is already 78 /*
79 * There are two cases for interrupts: one where the Guest is already
71 * in the kernel, and a more complex one where the Guest is in 80 * in the kernel, and a more complex one where the Guest is in
72 * userspace. We check the privilege level to find out. */ 81 * userspace. We check the privilege level to find out.
82 */
73 if ((cpu->regs->ss&0x3) != GUEST_PL) { 83 if ((cpu->regs->ss&0x3) != GUEST_PL) {
74 /* The Guest told us their kernel stack with the SET_STACK 84 /*
75 * hypercall: both the virtual address and the segment */ 85 * The Guest told us their kernel stack with the SET_STACK
86 * hypercall: both the virtual address and the segment.
87 */
76 virtstack = cpu->esp1; 88 virtstack = cpu->esp1;
77 ss = cpu->ss1; 89 ss = cpu->ss1;
78 90
79 origstack = gstack = guest_pa(cpu, virtstack); 91 origstack = gstack = guest_pa(cpu, virtstack);
80 /* We push the old stack segment and pointer onto the new 92 /*
93 * We push the old stack segment and pointer onto the new
81 * stack: when the Guest does an "iret" back from the interrupt 94 * stack: when the Guest does an "iret" back from the interrupt
82 * handler the CPU will notice they're dropping privilege 95 * handler the CPU will notice they're dropping privilege
83 * levels and expect these here. */ 96 * levels and expect these here.
97 */
84 push_guest_stack(cpu, &gstack, cpu->regs->ss); 98 push_guest_stack(cpu, &gstack, cpu->regs->ss);
85 push_guest_stack(cpu, &gstack, cpu->regs->esp); 99 push_guest_stack(cpu, &gstack, cpu->regs->esp);
86 } else { 100 } else {
@@ -91,18 +105,22 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
91 origstack = gstack = guest_pa(cpu, virtstack); 105 origstack = gstack = guest_pa(cpu, virtstack);
92 } 106 }
93 107
94 /* Remember that we never let the Guest actually disable interrupts, so 108 /*
109 * Remember that we never let the Guest actually disable interrupts, so
95 * the "Interrupt Flag" bit is always set. We copy that bit from the 110 * the "Interrupt Flag" bit is always set. We copy that bit from the
96 * Guest's "irq_enabled" field into the eflags word: we saw the Guest 111 * Guest's "irq_enabled" field into the eflags word: we saw the Guest
97 * copy it back in "lguest_iret". */ 112 * copy it back in "lguest_iret".
113 */
98 eflags = cpu->regs->eflags; 114 eflags = cpu->regs->eflags;
99 if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0 115 if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0
100 && !(irq_enable & X86_EFLAGS_IF)) 116 && !(irq_enable & X86_EFLAGS_IF))
101 eflags &= ~X86_EFLAGS_IF; 117 eflags &= ~X86_EFLAGS_IF;
102 118
103 /* An interrupt is expected to push three things on the stack: the old 119 /*
120 * An interrupt is expected to push three things on the stack: the old
104 * "eflags" word, the old code segment, and the old instruction 121 * "eflags" word, the old code segment, and the old instruction
105 * pointer. */ 122 * pointer.
123 */
106 push_guest_stack(cpu, &gstack, eflags); 124 push_guest_stack(cpu, &gstack, eflags);
107 push_guest_stack(cpu, &gstack, cpu->regs->cs); 125 push_guest_stack(cpu, &gstack, cpu->regs->cs);
108 push_guest_stack(cpu, &gstack, cpu->regs->eip); 126 push_guest_stack(cpu, &gstack, cpu->regs->eip);
@@ -111,15 +129,19 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
111 if (has_err) 129 if (has_err)
112 push_guest_stack(cpu, &gstack, cpu->regs->errcode); 130 push_guest_stack(cpu, &gstack, cpu->regs->errcode);
113 131
114 /* Now we've pushed all the old state, we change the stack, the code 132 /*
115 * segment and the address to execute. */ 133 * Now we've pushed all the old state, we change the stack, the code
134 * segment and the address to execute.
135 */
116 cpu->regs->ss = ss; 136 cpu->regs->ss = ss;
117 cpu->regs->esp = virtstack + (gstack - origstack); 137 cpu->regs->esp = virtstack + (gstack - origstack);
118 cpu->regs->cs = (__KERNEL_CS|GUEST_PL); 138 cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
119 cpu->regs->eip = idt_address(lo, hi); 139 cpu->regs->eip = idt_address(lo, hi);
120 140
121 /* There are two kinds of interrupt handlers: 0xE is an "interrupt 141 /*
122 * gate" which expects interrupts to be disabled on entry. */ 142 * There are two kinds of interrupt handlers: 0xE is an "interrupt
143 * gate" which expects interrupts to be disabled on entry.
144 */
123 if (idt_type(lo, hi) == 0xE) 145 if (idt_type(lo, hi) == 0xE)
124 if (put_user(0, &cpu->lg->lguest_data->irq_enabled)) 146 if (put_user(0, &cpu->lg->lguest_data->irq_enabled))
125 kill_guest(cpu, "Disabling interrupts"); 147 kill_guest(cpu, "Disabling interrupts");
@@ -130,7 +152,8 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
130 * 152 *
131 * interrupt_pending() returns the first pending interrupt which isn't blocked 153 * interrupt_pending() returns the first pending interrupt which isn't blocked
132 * by the Guest. It is called before every entry to the Guest, and just before 154 * by the Guest. It is called before every entry to the Guest, and just before
133 * we go to sleep when the Guest has halted itself. */ 155 * we go to sleep when the Guest has halted itself.
156 */
134unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) 157unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
135{ 158{
136 unsigned int irq; 159 unsigned int irq;
@@ -140,8 +163,10 @@ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
140 if (!cpu->lg->lguest_data) 163 if (!cpu->lg->lguest_data)
141 return LGUEST_IRQS; 164 return LGUEST_IRQS;
142 165
143 /* Take our "irqs_pending" array and remove any interrupts the Guest 166 /*
144 * wants blocked: the result ends up in "blk". */ 167 * Take our "irqs_pending" array and remove any interrupts the Guest
168 * wants blocked: the result ends up in "blk".
169 */
145 if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, 170 if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
146 sizeof(blk))) 171 sizeof(blk)))
147 return LGUEST_IRQS; 172 return LGUEST_IRQS;
@@ -154,16 +179,20 @@ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
154 return irq; 179 return irq;
155} 180}
156 181
157/* This actually diverts the Guest to running an interrupt handler, once an 182/*
158 * interrupt has been identified by interrupt_pending(). */ 183 * This actually diverts the Guest to running an interrupt handler, once an
184 * interrupt has been identified by interrupt_pending().
185 */
159void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) 186void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
160{ 187{
161 struct desc_struct *idt; 188 struct desc_struct *idt;
162 189
163 BUG_ON(irq >= LGUEST_IRQS); 190 BUG_ON(irq >= LGUEST_IRQS);
164 191
165 /* They may be in the middle of an iret, where they asked us never to 192 /*
166 * deliver interrupts. */ 193 * They may be in the middle of an iret, where they asked us never to
194 * deliver interrupts.
195 */
167 if (cpu->regs->eip >= cpu->lg->noirq_start && 196 if (cpu->regs->eip >= cpu->lg->noirq_start &&
168 (cpu->regs->eip < cpu->lg->noirq_end)) 197 (cpu->regs->eip < cpu->lg->noirq_end))
169 return; 198 return;
@@ -187,29 +216,37 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
187 } 216 }
188 } 217 }
189 218
190 /* Look at the IDT entry the Guest gave us for this interrupt. The 219 /*
220 * Look at the IDT entry the Guest gave us for this interrupt. The
191 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip 221 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
192 * over them. */ 222 * over them.
223 */
193 idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; 224 idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
194 /* If they don't have a handler (yet?), we just ignore it */ 225 /* If they don't have a handler (yet?), we just ignore it */
195 if (idt_present(idt->a, idt->b)) { 226 if (idt_present(idt->a, idt->b)) {
196 /* OK, mark it no longer pending and deliver it. */ 227 /* OK, mark it no longer pending and deliver it. */
197 clear_bit(irq, cpu->irqs_pending); 228 clear_bit(irq, cpu->irqs_pending);
198 /* set_guest_interrupt() takes the interrupt descriptor and a 229 /*
230 * set_guest_interrupt() takes the interrupt descriptor and a
199 * flag to say whether this interrupt pushes an error code onto 231 * flag to say whether this interrupt pushes an error code onto
200 * the stack as well: virtual interrupts never do. */ 232 * the stack as well: virtual interrupts never do.
233 */
201 set_guest_interrupt(cpu, idt->a, idt->b, false); 234 set_guest_interrupt(cpu, idt->a, idt->b, false);
202 } 235 }
203 236
204 /* Every time we deliver an interrupt, we update the timestamp in the 237 /*
238 * Every time we deliver an interrupt, we update the timestamp in the
205 * Guest's lguest_data struct. It would be better for the Guest if we 239 * Guest's lguest_data struct. It would be better for the Guest if we
206 * did this more often, but it can actually be quite slow: doing it 240 * did this more often, but it can actually be quite slow: doing it
207 * here is a compromise which means at least it gets updated every 241 * here is a compromise which means at least it gets updated every
208 * timer interrupt. */ 242 * timer interrupt.
243 */
209 write_timestamp(cpu); 244 write_timestamp(cpu);
210 245
211 /* If there are no other interrupts we want to deliver, clear 246 /*
212 * the pending flag. */ 247 * If there are no other interrupts we want to deliver, clear
248 * the pending flag.
249 */
213 if (!more) 250 if (!more)
214 put_user(0, &cpu->lg->lguest_data->irq_pending); 251 put_user(0, &cpu->lg->lguest_data->irq_pending);
215} 252}
@@ -217,24 +254,29 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
217/* And this is the routine when we want to set an interrupt for the Guest. */ 254/* And this is the routine when we want to set an interrupt for the Guest. */
218void set_interrupt(struct lg_cpu *cpu, unsigned int irq) 255void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
219{ 256{
220 /* Next time the Guest runs, the core code will see if it can deliver 257 /*
221 * this interrupt. */ 258 * Next time the Guest runs, the core code will see if it can deliver
259 * this interrupt.
260 */
222 set_bit(irq, cpu->irqs_pending); 261 set_bit(irq, cpu->irqs_pending);
223 262
224 /* Make sure it sees it; it might be asleep (eg. halted), or 263 /*
225 * running the Guest right now, in which case kick_process() 264 * Make sure it sees it; it might be asleep (eg. halted), or running
226 * will knock it out. */ 265 * the Guest right now, in which case kick_process() will knock it out.
266 */
227 if (!wake_up_process(cpu->tsk)) 267 if (!wake_up_process(cpu->tsk))
228 kick_process(cpu->tsk); 268 kick_process(cpu->tsk);
229} 269}
230/*:*/ 270/*:*/
231 271
232/* Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent 272/*
273 * Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent
233 * me a patch, so we support that too. It'd be a big step for lguest if half 274 * me a patch, so we support that too. It'd be a big step for lguest if half
234 * the Plan 9 user base were to start using it. 275 * the Plan 9 user base were to start using it.
235 * 276 *
236 * Actually now I think of it, it's possible that Ron *is* half the Plan 9 277 * Actually now I think of it, it's possible that Ron *is* half the Plan 9
237 * userbase. Oh well. */ 278 * userbase. Oh well.
279 */
238static bool could_be_syscall(unsigned int num) 280static bool could_be_syscall(unsigned int num)
239{ 281{
240 /* Normal Linux SYSCALL_VECTOR or reserved vector? */ 282 /* Normal Linux SYSCALL_VECTOR or reserved vector? */
@@ -274,9 +316,11 @@ void free_interrupts(void)
274 clear_bit(syscall_vector, used_vectors); 316 clear_bit(syscall_vector, used_vectors);
275} 317}
276 318
277/*H:220 Now we've got the routines to deliver interrupts, delivering traps like 319/*H:220
320 * Now we've got the routines to deliver interrupts, delivering traps like
278 * page fault is easy. The only trick is that Intel decided that some traps 321 * page fault is easy. The only trick is that Intel decided that some traps
279 * should have error codes: */ 322 * should have error codes:
323 */
280static bool has_err(unsigned int trap) 324static bool has_err(unsigned int trap)
281{ 325{
282 return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); 326 return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
@@ -285,13 +329,17 @@ static bool has_err(unsigned int trap)
285/* deliver_trap() returns true if it could deliver the trap. */ 329/* deliver_trap() returns true if it could deliver the trap. */
286bool deliver_trap(struct lg_cpu *cpu, unsigned int num) 330bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
287{ 331{
288 /* Trap numbers are always 8 bit, but we set an impossible trap number 332 /*
289 * for traps inside the Switcher, so check that here. */ 333 * Trap numbers are always 8 bit, but we set an impossible trap number
334 * for traps inside the Switcher, so check that here.
335 */
290 if (num >= ARRAY_SIZE(cpu->arch.idt)) 336 if (num >= ARRAY_SIZE(cpu->arch.idt))
291 return false; 337 return false;
292 338
293 /* Early on the Guest hasn't set the IDT entries (or maybe it put a 339 /*
294 * bogus one in): if we fail here, the Guest will be killed. */ 340 * Early on the Guest hasn't set the IDT entries (or maybe it put a
341 * bogus one in): if we fail here, the Guest will be killed.
342 */
295 if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) 343 if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
296 return false; 344 return false;
297 set_guest_interrupt(cpu, cpu->arch.idt[num].a, 345 set_guest_interrupt(cpu, cpu->arch.idt[num].a,
@@ -299,7 +347,8 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
299 return true; 347 return true;
300} 348}
301 349
302/*H:250 Here's the hard part: returning to the Host every time a trap happens 350/*H:250
351 * Here's the hard part: returning to the Host every time a trap happens
303 * and then calling deliver_trap() and re-entering the Guest is slow. 352 * and then calling deliver_trap() and re-entering the Guest is slow.
304 * Particularly because Guest userspace system calls are traps (usually trap 353 * Particularly because Guest userspace system calls are traps (usually trap
305 * 128). 354 * 128).
@@ -311,69 +360,87 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
311 * the other hypervisors would beat it up at lunchtime. 360 * the other hypervisors would beat it up at lunchtime.
312 * 361 *
313 * This routine indicates if a particular trap number could be delivered 362 * This routine indicates if a particular trap number could be delivered
314 * directly. */ 363 * directly.
364 */
315static bool direct_trap(unsigned int num) 365static bool direct_trap(unsigned int num)
316{ 366{
317 /* Hardware interrupts don't go to the Guest at all (except system 367 /*
318 * call). */ 368 * Hardware interrupts don't go to the Guest at all (except system
369 * call).
370 */
319 if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num)) 371 if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num))
320 return false; 372 return false;
321 373
322 /* The Host needs to see page faults (for shadow paging and to save the 374 /*
375 * The Host needs to see page faults (for shadow paging and to save the
323 * fault address), general protection faults (in/out emulation) and 376 * fault address), general protection faults (in/out emulation) and
324 * device not available (TS handling), invalid opcode fault (kvm hcall), 377 * device not available (TS handling), invalid opcode fault (kvm hcall),
325 * and of course, the hypercall trap. */ 378 * and of course, the hypercall trap.
379 */
326 return num != 14 && num != 13 && num != 7 && 380 return num != 14 && num != 13 && num != 7 &&
327 num != 6 && num != LGUEST_TRAP_ENTRY; 381 num != 6 && num != LGUEST_TRAP_ENTRY;
328} 382}
329/*:*/ 383/*:*/
330 384
331/*M:005 The Guest has the ability to turn its interrupt gates into trap gates, 385/*M:005
386 * The Guest has the ability to turn its interrupt gates into trap gates,
332 * if it is careful. The Host will let trap gates can go directly to the 387 * if it is careful. The Host will let trap gates can go directly to the
333 * Guest, but the Guest needs the interrupts atomically disabled for an 388 * Guest, but the Guest needs the interrupts atomically disabled for an
334 * interrupt gate. It can do this by pointing the trap gate at instructions 389 * interrupt gate. It can do this by pointing the trap gate at instructions
335 * within noirq_start and noirq_end, where it can safely disable interrupts. */ 390 * within noirq_start and noirq_end, where it can safely disable interrupts.
391 */
336 392
337/*M:006 The Guests do not use the sysenter (fast system call) instruction, 393/*M:006
394 * The Guests do not use the sysenter (fast system call) instruction,
338 * because it's hardcoded to enter privilege level 0 and so can't go direct. 395 * because it's hardcoded to enter privilege level 0 and so can't go direct.
339 * It's about twice as fast as the older "int 0x80" system call, so it might 396 * It's about twice as fast as the older "int 0x80" system call, so it might
340 * still be worthwhile to handle it in the Switcher and lcall down to the 397 * still be worthwhile to handle it in the Switcher and lcall down to the
341 * Guest. The sysenter semantics are hairy tho: search for that keyword in 398 * Guest. The sysenter semantics are hairy tho: search for that keyword in
342 * entry.S :*/ 399 * entry.S
400:*/
343 401
344/*H:260 When we make traps go directly into the Guest, we need to make sure 402/*H:260
403 * When we make traps go directly into the Guest, we need to make sure
345 * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the 404 * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the
346 * CPU trying to deliver the trap will fault while trying to push the interrupt 405 * CPU trying to deliver the trap will fault while trying to push the interrupt
347 * words on the stack: this is called a double fault, and it forces us to kill 406 * words on the stack: this is called a double fault, and it forces us to kill
348 * the Guest. 407 * the Guest.
349 * 408 *
350 * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. */ 409 * Which is deeply unfair, because (literally!) it wasn't the Guests' fault.
410 */
351void pin_stack_pages(struct lg_cpu *cpu) 411void pin_stack_pages(struct lg_cpu *cpu)
352{ 412{
353 unsigned int i; 413 unsigned int i;
354 414
355 /* Depending on the CONFIG_4KSTACKS option, the Guest can have one or 415 /*
356 * two pages of stack space. */ 416 * Depending on the CONFIG_4KSTACKS option, the Guest can have one or
417 * two pages of stack space.
418 */
357 for (i = 0; i < cpu->lg->stack_pages; i++) 419 for (i = 0; i < cpu->lg->stack_pages; i++)
358 /* The stack grows *upwards*, so the address we're given is the 420 /*
421 * The stack grows *upwards*, so the address we're given is the
359 * start of the page after the kernel stack. Subtract one to 422 * start of the page after the kernel stack. Subtract one to
360 * get back onto the first stack page, and keep subtracting to 423 * get back onto the first stack page, and keep subtracting to
361 * get to the rest of the stack pages. */ 424 * get to the rest of the stack pages.
425 */
362 pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE); 426 pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE);
363} 427}
364 428
365/* Direct traps also mean that we need to know whenever the Guest wants to use 429/*
430 * Direct traps also mean that we need to know whenever the Guest wants to use
366 * a different kernel stack, so we can change the IDT entries to use that 431 * a different kernel stack, so we can change the IDT entries to use that
367 * stack. The IDT entries expect a virtual address, so unlike most addresses 432 * stack. The IDT entries expect a virtual address, so unlike most addresses
368 * the Guest gives us, the "esp" (stack pointer) value here is virtual, not 433 * the Guest gives us, the "esp" (stack pointer) value here is virtual, not
369 * physical. 434 * physical.
370 * 435 *
371 * In Linux each process has its own kernel stack, so this happens a lot: we 436 * In Linux each process has its own kernel stack, so this happens a lot: we
372 * change stacks on each context switch. */ 437 * change stacks on each context switch.
438 */
373void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) 439void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
374{ 440{
375 /* You are not allowed have a stack segment with privilege level 0: bad 441 /*
376 * Guest! */ 442 * You're not allowed a stack segment with privilege level 0: bad Guest!
443 */
377 if ((seg & 0x3) != GUEST_PL) 444 if ((seg & 0x3) != GUEST_PL)
378 kill_guest(cpu, "bad stack segment %i", seg); 445 kill_guest(cpu, "bad stack segment %i", seg);
379 /* We only expect one or two stack pages. */ 446 /* We only expect one or two stack pages. */
@@ -387,11 +454,15 @@ void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
387 pin_stack_pages(cpu); 454 pin_stack_pages(cpu);
388} 455}
389 456
390/* All this reference to mapping stacks leads us neatly into the other complex 457/*
391 * part of the Host: page table handling. */ 458 * All this reference to mapping stacks leads us neatly into the other complex
459 * part of the Host: page table handling.
460 */
392 461
393/*H:235 This is the routine which actually checks the Guest's IDT entry and 462/*H:235
394 * transfers it into the entry in "struct lguest": */ 463 * This is the routine which actually checks the Guest's IDT entry and
464 * transfers it into the entry in "struct lguest":
465 */
395static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, 466static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap,
396 unsigned int num, u32 lo, u32 hi) 467 unsigned int num, u32 lo, u32 hi)
397{ 468{
@@ -407,30 +478,38 @@ static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap,
407 if (type != 0xE && type != 0xF) 478 if (type != 0xE && type != 0xF)
408 kill_guest(cpu, "bad IDT type %i", type); 479 kill_guest(cpu, "bad IDT type %i", type);
409 480
410 /* We only copy the handler address, present bit, privilege level and 481 /*
482 * We only copy the handler address, present bit, privilege level and
411 * type. The privilege level controls where the trap can be triggered 483 * type. The privilege level controls where the trap can be triggered
412 * manually with an "int" instruction. This is usually GUEST_PL, 484 * manually with an "int" instruction. This is usually GUEST_PL,
413 * except for system calls which userspace can use. */ 485 * except for system calls which userspace can use.
486 */
414 trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF); 487 trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF);
415 trap->b = (hi&0xFFFFEF00); 488 trap->b = (hi&0xFFFFEF00);
416} 489}
417 490
418/*H:230 While we're here, dealing with delivering traps and interrupts to the 491/*H:230
492 * While we're here, dealing with delivering traps and interrupts to the
419 * Guest, we might as well complete the picture: how the Guest tells us where 493 * Guest, we might as well complete the picture: how the Guest tells us where
420 * it wants them to go. This would be simple, except making traps fast 494 * it wants them to go. This would be simple, except making traps fast
421 * requires some tricks. 495 * requires some tricks.
422 * 496 *
423 * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the 497 * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the
424 * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */ 498 * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here.
499 */
425void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) 500void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)
426{ 501{
427 /* Guest never handles: NMI, doublefault, spurious interrupt or 502 /*
428 * hypercall. We ignore when it tries to set them. */ 503 * Guest never handles: NMI, doublefault, spurious interrupt or
504 * hypercall. We ignore when it tries to set them.
505 */
429 if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY) 506 if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY)
430 return; 507 return;
431 508
432 /* Mark the IDT as changed: next time the Guest runs we'll know we have 509 /*
433 * to copy this again. */ 510 * Mark the IDT as changed: next time the Guest runs we'll know we have
511 * to copy this again.
512 */
434 cpu->changed |= CHANGED_IDT; 513 cpu->changed |= CHANGED_IDT;
435 514
436 /* Check that the Guest doesn't try to step outside the bounds. */ 515 /* Check that the Guest doesn't try to step outside the bounds. */
@@ -440,9 +519,11 @@ void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)
440 set_trap(cpu, &cpu->arch.idt[num], num, lo, hi); 519 set_trap(cpu, &cpu->arch.idt[num], num, lo, hi);
441} 520}
442 521
443/* The default entry for each interrupt points into the Switcher routines which 522/*
523 * The default entry for each interrupt points into the Switcher routines which
444 * simply return to the Host. The run_guest() loop will then call 524 * simply return to the Host. The run_guest() loop will then call
445 * deliver_trap() to bounce it back into the Guest. */ 525 * deliver_trap() to bounce it back into the Guest.
526 */
446static void default_idt_entry(struct desc_struct *idt, 527static void default_idt_entry(struct desc_struct *idt,
447 int trap, 528 int trap,
448 const unsigned long handler, 529 const unsigned long handler,
@@ -451,13 +532,17 @@ static void default_idt_entry(struct desc_struct *idt,
451 /* A present interrupt gate. */ 532 /* A present interrupt gate. */
452 u32 flags = 0x8e00; 533 u32 flags = 0x8e00;
453 534
454 /* Set the privilege level on the entry for the hypercall: this allows 535 /*
455 * the Guest to use the "int" instruction to trigger it. */ 536 * Set the privilege level on the entry for the hypercall: this allows
537 * the Guest to use the "int" instruction to trigger it.
538 */
456 if (trap == LGUEST_TRAP_ENTRY) 539 if (trap == LGUEST_TRAP_ENTRY)
457 flags |= (GUEST_PL << 13); 540 flags |= (GUEST_PL << 13);
458 else if (base) 541 else if (base)
459 /* Copy priv. level from what Guest asked for. This allows 542 /*
460 * debug (int 3) traps from Guest userspace, for example. */ 543 * Copy privilege level from what Guest asked for. This allows
544 * debug (int 3) traps from Guest userspace, for example.
545 */
461 flags |= (base->b & 0x6000); 546 flags |= (base->b & 0x6000);
462 547
463 /* Now pack it into the IDT entry in its weird format. */ 548 /* Now pack it into the IDT entry in its weird format. */
@@ -475,16 +560,20 @@ void setup_default_idt_entries(struct lguest_ro_state *state,
475 default_idt_entry(&state->guest_idt[i], i, def[i], NULL); 560 default_idt_entry(&state->guest_idt[i], i, def[i], NULL);
476} 561}
477 562
478/*H:240 We don't use the IDT entries in the "struct lguest" directly, instead 563/*H:240
564 * We don't use the IDT entries in the "struct lguest" directly, instead
479 * we copy them into the IDT which we've set up for Guests on this CPU, just 565 * we copy them into the IDT which we've set up for Guests on this CPU, just
480 * before we run the Guest. This routine does that copy. */ 566 * before we run the Guest. This routine does that copy.
567 */
481void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, 568void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
482 const unsigned long *def) 569 const unsigned long *def)
483{ 570{
484 unsigned int i; 571 unsigned int i;
485 572
486 /* We can simply copy the direct traps, otherwise we use the default 573 /*
487 * ones in the Switcher: they will return to the Host. */ 574 * We can simply copy the direct traps, otherwise we use the default
575 * ones in the Switcher: they will return to the Host.
576 */
488 for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) { 577 for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) {
489 const struct desc_struct *gidt = &cpu->arch.idt[i]; 578 const struct desc_struct *gidt = &cpu->arch.idt[i];
490 579
@@ -492,14 +581,16 @@ void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
492 if (!direct_trap(i)) 581 if (!direct_trap(i))
493 continue; 582 continue;
494 583
495 /* Only trap gates (type 15) can go direct to the Guest. 584 /*
585 * Only trap gates (type 15) can go direct to the Guest.
496 * Interrupt gates (type 14) disable interrupts as they are 586 * Interrupt gates (type 14) disable interrupts as they are
497 * entered, which we never let the Guest do. Not present 587 * entered, which we never let the Guest do. Not present
498 * entries (type 0x0) also can't go direct, of course. 588 * entries (type 0x0) also can't go direct, of course.
499 * 589 *
500 * If it can't go direct, we still need to copy the priv. level: 590 * If it can't go direct, we still need to copy the priv. level:
501 * they might want to give userspace access to a software 591 * they might want to give userspace access to a software
502 * interrupt. */ 592 * interrupt.
593 */
503 if (idt_type(gidt->a, gidt->b) == 0xF) 594 if (idt_type(gidt->a, gidt->b) == 0xF)
504 idt[i] = *gidt; 595 idt[i] = *gidt;
505 else 596 else
@@ -518,7 +609,8 @@ void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
518 * the next timer interrupt (in nanoseconds). We use the high-resolution timer 609 * the next timer interrupt (in nanoseconds). We use the high-resolution timer
519 * infrastructure to set a callback at that time. 610 * infrastructure to set a callback at that time.
520 * 611 *
521 * 0 means "turn off the clock". */ 612 * 0 means "turn off the clock".
613 */
522void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) 614void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta)
523{ 615{
524 ktime_t expires; 616 ktime_t expires;
@@ -529,9 +621,11 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta)
529 return; 621 return;
530 } 622 }
531 623
532 /* We use wallclock time here, so the Guest might not be running for 624 /*
625 * We use wallclock time here, so the Guest might not be running for
533 * all the time between now and the timer interrupt it asked for. This 626 * all the time between now and the timer interrupt it asked for. This
534 * is almost always the right thing to do. */ 627 * is almost always the right thing to do.
628 */
535 expires = ktime_add_ns(ktime_get_real(), delta); 629 expires = ktime_add_ns(ktime_get_real(), delta);
536 hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS); 630 hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS);
537} 631}
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index d4e8979735cb..bc28745d05af 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -16,15 +16,13 @@
16void free_pagetables(void); 16void free_pagetables(void);
17int init_pagetables(struct page **switcher_page, unsigned int pages); 17int init_pagetables(struct page **switcher_page, unsigned int pages);
18 18
19struct pgdir 19struct pgdir {
20{
21 unsigned long gpgdir; 20 unsigned long gpgdir;
22 pgd_t *pgdir; 21 pgd_t *pgdir;
23}; 22};
24 23
25/* We have two pages shared with guests, per cpu. */ 24/* We have two pages shared with guests, per cpu. */
26struct lguest_pages 25struct lguest_pages {
27{
28 /* This is the stack page mapped rw in guest */ 26 /* This is the stack page mapped rw in guest */
29 char spare[PAGE_SIZE - sizeof(struct lguest_regs)]; 27 char spare[PAGE_SIZE - sizeof(struct lguest_regs)];
30 struct lguest_regs regs; 28 struct lguest_regs regs;
@@ -38,8 +36,6 @@ struct lguest_pages
38#define CHANGED_GDT_TLS 4 /* Actually a subset of CHANGED_GDT */ 36#define CHANGED_GDT_TLS 4 /* Actually a subset of CHANGED_GDT */
39#define CHANGED_ALL 3 37#define CHANGED_ALL 3
40 38
41struct lguest;
42
43struct lg_cpu { 39struct lg_cpu {
44 unsigned int id; 40 unsigned int id;
45 struct lguest *lg; 41 struct lguest *lg;
@@ -56,13 +52,13 @@ struct lg_cpu {
56 52
57 unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ 53 unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */
58 54
59 /* At end of a page shared mapped over lguest_pages in guest. */ 55 /* At end of a page shared mapped over lguest_pages in guest. */
60 unsigned long regs_page; 56 unsigned long regs_page;
61 struct lguest_regs *regs; 57 struct lguest_regs *regs;
62 58
63 struct lguest_pages *last_pages; 59 struct lguest_pages *last_pages;
64 60
65 int cpu_pgd; /* which pgd this cpu is currently using */ 61 int cpu_pgd; /* Which pgd this cpu is currently using */
66 62
67 /* If a hypercall was asked for, this points to the arguments. */ 63 /* If a hypercall was asked for, this points to the arguments. */
68 struct hcall_args *hcall; 64 struct hcall_args *hcall;
@@ -82,7 +78,7 @@ struct lg_cpu {
82 78
83struct lg_eventfd { 79struct lg_eventfd {
84 unsigned long addr; 80 unsigned long addr;
85 struct file *event; 81 struct eventfd_ctx *event;
86}; 82};
87 83
88struct lg_eventfd_map { 84struct lg_eventfd_map {
@@ -91,15 +87,17 @@ struct lg_eventfd_map {
91}; 87};
92 88
93/* The private info the thread maintains about the guest. */ 89/* The private info the thread maintains about the guest. */
94struct lguest 90struct lguest {
95{
96 struct lguest_data __user *lguest_data; 91 struct lguest_data __user *lguest_data;
97 struct lg_cpu cpus[NR_CPUS]; 92 struct lg_cpu cpus[NR_CPUS];
98 unsigned int nr_cpus; 93 unsigned int nr_cpus;
99 94
100 u32 pfn_limit; 95 u32 pfn_limit;
101 /* This provides the offset to the base of guest-physical 96
102 * memory in the Launcher. */ 97 /*
98 * This provides the offset to the base of guest-physical memory in the
99 * Launcher.
100 */
103 void __user *mem_base; 101 void __user *mem_base;
104 unsigned long kernel_address; 102 unsigned long kernel_address;
105 103
@@ -124,11 +122,13 @@ bool lguest_address_ok(const struct lguest *lg,
124void __lgread(struct lg_cpu *, void *, unsigned long, unsigned); 122void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
125void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); 123void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
126 124
127/*H:035 Using memory-copy operations like that is usually inconvient, so we 125/*H:035
126 * Using memory-copy operations like that is usually inconvient, so we
128 * have the following helper macros which read and write a specific type (often 127 * have the following helper macros which read and write a specific type (often
129 * an unsigned long). 128 * an unsigned long).
130 * 129 *
131 * This reads into a variable of the given type then returns that. */ 130 * This reads into a variable of the given type then returns that.
131 */
132#define lgread(cpu, addr, type) \ 132#define lgread(cpu, addr, type) \
133 ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; }) 133 ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; })
134 134
@@ -142,9 +142,11 @@ void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
142 142
143int run_guest(struct lg_cpu *cpu, unsigned long __user *user); 143int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
144 144
145/* Helper macros to obtain the first 12 or the last 20 bits, this is only the 145/*
146 * Helper macros to obtain the first 12 or the last 20 bits, this is only the
146 * first step in the migration to the kernel types. pte_pfn is already defined 147 * first step in the migration to the kernel types. pte_pfn is already defined
147 * in the kernel. */ 148 * in the kernel.
149 */
148#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) 150#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
149#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) 151#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
150#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) 152#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK)
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index e082cdac88b4..b6200bc39b58 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -1,10 +1,12 @@
1/*P:050 Lguest guests use a very simple method to describe devices. It's a 1/*P:050
2 * Lguest guests use a very simple method to describe devices. It's a
2 * series of device descriptors contained just above the top of normal Guest 3 * series of device descriptors contained just above the top of normal Guest
3 * memory. 4 * memory.
4 * 5 *
5 * We use the standard "virtio" device infrastructure, which provides us with a 6 * We use the standard "virtio" device infrastructure, which provides us with a
6 * console, a network and a block driver. Each one expects some configuration 7 * console, a network and a block driver. Each one expects some configuration
7 * information and a "virtqueue" or two to send and receive data. :*/ 8 * information and a "virtqueue" or two to send and receive data.
9:*/
8#include <linux/init.h> 10#include <linux/init.h>
9#include <linux/bootmem.h> 11#include <linux/bootmem.h>
10#include <linux/lguest_launcher.h> 12#include <linux/lguest_launcher.h>
@@ -20,8 +22,10 @@
20/* The pointer to our (page) of device descriptions. */ 22/* The pointer to our (page) of device descriptions. */
21static void *lguest_devices; 23static void *lguest_devices;
22 24
23/* For Guests, device memory can be used as normal memory, so we cast away the 25/*
24 * __iomem to quieten sparse. */ 26 * For Guests, device memory can be used as normal memory, so we cast away the
27 * __iomem to quieten sparse.
28 */
25static inline void *lguest_map(unsigned long phys_addr, unsigned long pages) 29static inline void *lguest_map(unsigned long phys_addr, unsigned long pages)
26{ 30{
27 return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages); 31 return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages);
@@ -32,8 +36,10 @@ static inline void lguest_unmap(void *addr)
32 iounmap((__force void __iomem *)addr); 36 iounmap((__force void __iomem *)addr);
33} 37}
34 38
35/*D:100 Each lguest device is just a virtio device plus a pointer to its entry 39/*D:100
36 * in the lguest_devices page. */ 40 * Each lguest device is just a virtio device plus a pointer to its entry
41 * in the lguest_devices page.
42 */
37struct lguest_device { 43struct lguest_device {
38 struct virtio_device vdev; 44 struct virtio_device vdev;
39 45
@@ -41,9 +47,11 @@ struct lguest_device {
41 struct lguest_device_desc *desc; 47 struct lguest_device_desc *desc;
42}; 48};
43 49
44/* Since the virtio infrastructure hands us a pointer to the virtio_device all 50/*
51 * Since the virtio infrastructure hands us a pointer to the virtio_device all
45 * the time, it helps to have a curt macro to get a pointer to the struct 52 * the time, it helps to have a curt macro to get a pointer to the struct
46 * lguest_device it's enclosed in. */ 53 * lguest_device it's enclosed in.
54 */
47#define to_lgdev(vd) container_of(vd, struct lguest_device, vdev) 55#define to_lgdev(vd) container_of(vd, struct lguest_device, vdev)
48 56
49/*D:130 57/*D:130
@@ -55,7 +63,8 @@ struct lguest_device {
55 * the driver will look at them during setup. 63 * the driver will look at them during setup.
56 * 64 *
57 * A convenient routine to return the device's virtqueue config array: 65 * A convenient routine to return the device's virtqueue config array:
58 * immediately after the descriptor. */ 66 * immediately after the descriptor.
67 */
59static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc) 68static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc)
60{ 69{
61 return (void *)(desc + 1); 70 return (void *)(desc + 1);
@@ -98,10 +107,12 @@ static u32 lg_get_features(struct virtio_device *vdev)
98 return features; 107 return features;
99} 108}
100 109
101/* The virtio core takes the features the Host offers, and copies the 110/*
102 * ones supported by the driver into the vdev->features array. Once 111 * The virtio core takes the features the Host offers, and copies the ones
103 * that's all sorted out, this routine is called so we can tell the 112 * supported by the driver into the vdev->features array. Once that's all
104 * Host which features we understand and accept. */ 113 * sorted out, this routine is called so we can tell the Host which features we
114 * understand and accept.
115 */
105static void lg_finalize_features(struct virtio_device *vdev) 116static void lg_finalize_features(struct virtio_device *vdev)
106{ 117{
107 unsigned int i, bits; 118 unsigned int i, bits;
@@ -112,10 +123,11 @@ static void lg_finalize_features(struct virtio_device *vdev)
112 /* Give virtio_ring a chance to accept features. */ 123 /* Give virtio_ring a chance to accept features. */
113 vring_transport_features(vdev); 124 vring_transport_features(vdev);
114 125
115 /* The vdev->feature array is a Linux bitmask: this isn't the 126 /*
116 * same as a the simple array of bits used by lguest devices 127 * The vdev->feature array is a Linux bitmask: this isn't the same as a
117 * for features. So we do this slow, manual conversion which is 128 * the simple array of bits used by lguest devices for features. So we
118 * completely general. */ 129 * do this slow, manual conversion which is completely general.
130 */
119 memset(out_features, 0, desc->feature_len); 131 memset(out_features, 0, desc->feature_len);
120 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; 132 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
121 for (i = 0; i < bits; i++) { 133 for (i = 0; i < bits; i++) {
@@ -146,15 +158,19 @@ static void lg_set(struct virtio_device *vdev, unsigned int offset,
146 memcpy(lg_config(desc) + offset, buf, len); 158 memcpy(lg_config(desc) + offset, buf, len);
147} 159}
148 160
149/* The operations to get and set the status word just access the status field 161/*
150 * of the device descriptor. */ 162 * The operations to get and set the status word just access the status field
163 * of the device descriptor.
164 */
151static u8 lg_get_status(struct virtio_device *vdev) 165static u8 lg_get_status(struct virtio_device *vdev)
152{ 166{
153 return to_lgdev(vdev)->desc->status; 167 return to_lgdev(vdev)->desc->status;
154} 168}
155 169
156/* To notify on status updates, we (ab)use the NOTIFY hypercall, with the 170/*
157 * descriptor address of the device. A zero status means "reset". */ 171 * To notify on status updates, we (ab)use the NOTIFY hypercall, with the
172 * descriptor address of the device. A zero status means "reset".
173 */
158static void set_status(struct virtio_device *vdev, u8 status) 174static void set_status(struct virtio_device *vdev, u8 status)
159{ 175{
160 unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; 176 unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices;
@@ -191,8 +207,7 @@ static void lg_reset(struct virtio_device *vdev)
191 */ 207 */
192 208
193/*D:140 This is the information we remember about each virtqueue. */ 209/*D:140 This is the information we remember about each virtqueue. */
194struct lguest_vq_info 210struct lguest_vq_info {
195{
196 /* A copy of the information contained in the device config. */ 211 /* A copy of the information contained in the device config. */
197 struct lguest_vqconfig config; 212 struct lguest_vqconfig config;
198 213
@@ -200,13 +215,17 @@ struct lguest_vq_info
200 void *pages; 215 void *pages;
201}; 216};
202 217
203/* When the virtio_ring code wants to prod the Host, it calls us here and we 218/*
219 * When the virtio_ring code wants to prod the Host, it calls us here and we
204 * make a hypercall. We hand the physical address of the virtqueue so the Host 220 * make a hypercall. We hand the physical address of the virtqueue so the Host
205 * knows which virtqueue we're talking about. */ 221 * knows which virtqueue we're talking about.
222 */
206static void lg_notify(struct virtqueue *vq) 223static void lg_notify(struct virtqueue *vq)
207{ 224{
208 /* We store our virtqueue information in the "priv" pointer of the 225 /*
209 * virtqueue structure. */ 226 * We store our virtqueue information in the "priv" pointer of the
227 * virtqueue structure.
228 */
210 struct lguest_vq_info *lvq = vq->priv; 229 struct lguest_vq_info *lvq = vq->priv;
211 230
212 kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); 231 kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT);
@@ -215,7 +234,8 @@ static void lg_notify(struct virtqueue *vq)
215/* An extern declaration inside a C file is bad form. Don't do it. */ 234/* An extern declaration inside a C file is bad form. Don't do it. */
216extern void lguest_setup_irq(unsigned int irq); 235extern void lguest_setup_irq(unsigned int irq);
217 236
218/* This routine finds the first virtqueue described in the configuration of 237/*
238 * This routine finds the Nth virtqueue described in the configuration of
219 * this device and sets it up. 239 * this device and sets it up.
220 * 240 *
221 * This is kind of an ugly duckling. It'd be nicer to have a standard 241 * This is kind of an ugly duckling. It'd be nicer to have a standard
@@ -223,9 +243,7 @@ extern void lguest_setup_irq(unsigned int irq);
223 * everyone wants to do it differently. The KVM coders want the Guest to 243 * everyone wants to do it differently. The KVM coders want the Guest to
224 * allocate its own pages and tell the Host where they are, but for lguest it's 244 * allocate its own pages and tell the Host where they are, but for lguest it's
225 * simpler for the Host to simply tell us where the pages are. 245 * simpler for the Host to simply tell us where the pages are.
226 * 246 */
227 * So we provide drivers with a "find the Nth virtqueue and set it up"
228 * function. */
229static struct virtqueue *lg_find_vq(struct virtio_device *vdev, 247static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
230 unsigned index, 248 unsigned index,
231 void (*callback)(struct virtqueue *vq), 249 void (*callback)(struct virtqueue *vq),
@@ -244,9 +262,11 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
244 if (!lvq) 262 if (!lvq)
245 return ERR_PTR(-ENOMEM); 263 return ERR_PTR(-ENOMEM);
246 264
247 /* Make a copy of the "struct lguest_vqconfig" entry, which sits after 265 /*
266 * Make a copy of the "struct lguest_vqconfig" entry, which sits after
248 * the descriptor. We need a copy because the config space might not 267 * the descriptor. We need a copy because the config space might not
249 * be aligned correctly. */ 268 * be aligned correctly.
269 */
250 memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config)); 270 memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config));
251 271
252 printk("Mapping virtqueue %i addr %lx\n", index, 272 printk("Mapping virtqueue %i addr %lx\n", index,
@@ -261,8 +281,10 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
261 goto free_lvq; 281 goto free_lvq;
262 } 282 }
263 283
264 /* OK, tell virtio_ring.c to set up a virtqueue now we know its size 284 /*
265 * and we've got a pointer to its pages. */ 285 * OK, tell virtio_ring.c to set up a virtqueue now we know its size
286 * and we've got a pointer to its pages.
287 */
266 vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, 288 vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN,
267 vdev, lvq->pages, lg_notify, callback, name); 289 vdev, lvq->pages, lg_notify, callback, name);
268 if (!vq) { 290 if (!vq) {
@@ -273,18 +295,23 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
273 /* Make sure the interrupt is allocated. */ 295 /* Make sure the interrupt is allocated. */
274 lguest_setup_irq(lvq->config.irq); 296 lguest_setup_irq(lvq->config.irq);
275 297
276 /* Tell the interrupt for this virtqueue to go to the virtio_ring 298 /*
277 * interrupt handler. */ 299 * Tell the interrupt for this virtqueue to go to the virtio_ring
278 /* FIXME: We used to have a flag for the Host to tell us we could use 300 * interrupt handler.
301 *
302 * FIXME: We used to have a flag for the Host to tell us we could use
279 * the interrupt as a source of randomness: it'd be nice to have that 303 * the interrupt as a source of randomness: it'd be nice to have that
280 * back.. */ 304 * back.
305 */
281 err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, 306 err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
282 dev_name(&vdev->dev), vq); 307 dev_name(&vdev->dev), vq);
283 if (err) 308 if (err)
284 goto destroy_vring; 309 goto destroy_vring;
285 310
286 /* Last of all we hook up our 'struct lguest_vq_info" to the 311 /*
287 * virtqueue's priv pointer. */ 312 * Last of all we hook up our 'struct lguest_vq_info" to the
313 * virtqueue's priv pointer.
314 */
288 vq->priv = lvq; 315 vq->priv = lvq;
289 return vq; 316 return vq;
290 317
@@ -358,11 +385,14 @@ static struct virtio_config_ops lguest_config_ops = {
358 .del_vqs = lg_del_vqs, 385 .del_vqs = lg_del_vqs,
359}; 386};
360 387
361/* The root device for the lguest virtio devices. This makes them appear as 388/*
362 * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. */ 389 * The root device for the lguest virtio devices. This makes them appear as
390 * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2.
391 */
363static struct device *lguest_root; 392static struct device *lguest_root;
364 393
365/*D:120 This is the core of the lguest bus: actually adding a new device. 394/*D:120
395 * This is the core of the lguest bus: actually adding a new device.
366 * It's a separate function because it's neater that way, and because an 396 * It's a separate function because it's neater that way, and because an
367 * earlier version of the code supported hotplug and unplug. They were removed 397 * earlier version of the code supported hotplug and unplug. They were removed
368 * early on because they were never used. 398 * early on because they were never used.
@@ -371,14 +401,14 @@ static struct device *lguest_root;
371 * 401 *
372 * It's worth reading this carefully: we start with a pointer to the new device 402 * It's worth reading this carefully: we start with a pointer to the new device
373 * descriptor in the "lguest_devices" page, and the offset into the device 403 * descriptor in the "lguest_devices" page, and the offset into the device
374 * descriptor page so we can uniquely identify it if things go badly wrong. */ 404 * descriptor page so we can uniquely identify it if things go badly wrong.
405 */
375static void add_lguest_device(struct lguest_device_desc *d, 406static void add_lguest_device(struct lguest_device_desc *d,
376 unsigned int offset) 407 unsigned int offset)
377{ 408{
378 struct lguest_device *ldev; 409 struct lguest_device *ldev;
379 410
380 /* Start with zeroed memory; Linux's device layer seems to count on 411 /* Start with zeroed memory; Linux's device layer counts on it. */
381 * it. */
382 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); 412 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
383 if (!ldev) { 413 if (!ldev) {
384 printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n", 414 printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n",
@@ -388,17 +418,25 @@ static void add_lguest_device(struct lguest_device_desc *d,
388 418
389 /* This devices' parent is the lguest/ dir. */ 419 /* This devices' parent is the lguest/ dir. */
390 ldev->vdev.dev.parent = lguest_root; 420 ldev->vdev.dev.parent = lguest_root;
391 /* We have a unique device index thanks to the dev_index counter. */ 421 /*
422 * The device type comes straight from the descriptor. There's also a
423 * device vendor field in the virtio_device struct, which we leave as
424 * 0.
425 */
392 ldev->vdev.id.device = d->type; 426 ldev->vdev.id.device = d->type;
393 /* We have a simple set of routines for querying the device's 427 /*
394 * configuration information and setting its status. */ 428 * We have a simple set of routines for querying the device's
429 * configuration information and setting its status.
430 */
395 ldev->vdev.config = &lguest_config_ops; 431 ldev->vdev.config = &lguest_config_ops;
396 /* And we remember the device's descriptor for lguest_config_ops. */ 432 /* And we remember the device's descriptor for lguest_config_ops. */
397 ldev->desc = d; 433 ldev->desc = d;
398 434
399 /* register_virtio_device() sets up the generic fields for the struct 435 /*
436 * register_virtio_device() sets up the generic fields for the struct
400 * virtio_device and calls device_register(). This makes the bus 437 * virtio_device and calls device_register(). This makes the bus
401 * infrastructure look for a matching driver. */ 438 * infrastructure look for a matching driver.
439 */
402 if (register_virtio_device(&ldev->vdev) != 0) { 440 if (register_virtio_device(&ldev->vdev) != 0) {
403 printk(KERN_ERR "Failed to register lguest dev %u type %u\n", 441 printk(KERN_ERR "Failed to register lguest dev %u type %u\n",
404 offset, d->type); 442 offset, d->type);
@@ -406,8 +444,10 @@ static void add_lguest_device(struct lguest_device_desc *d,
406 } 444 }
407} 445}
408 446
409/*D:110 scan_devices() simply iterates through the device page. The type 0 is 447/*D:110
410 * reserved to mean "end of devices". */ 448 * scan_devices() simply iterates through the device page. The type 0 is
449 * reserved to mean "end of devices".
450 */
411static void scan_devices(void) 451static void scan_devices(void)
412{ 452{
413 unsigned int i; 453 unsigned int i;
@@ -426,7 +466,8 @@ static void scan_devices(void)
426 } 466 }
427} 467}
428 468
429/*D:105 Fairly early in boot, lguest_devices_init() is called to set up the 469/*D:105
470 * Fairly early in boot, lguest_devices_init() is called to set up the
430 * lguest device infrastructure. We check that we are a Guest by checking 471 * lguest device infrastructure. We check that we are a Guest by checking
431 * pv_info.name: there are other ways of checking, but this seems most 472 * pv_info.name: there are other ways of checking, but this seems most
432 * obvious to me. 473 * obvious to me.
@@ -437,7 +478,8 @@ static void scan_devices(void)
437 * correct sysfs incantation). 478 * correct sysfs incantation).
438 * 479 *
439 * Finally we call scan_devices() which adds all the devices found in the 480 * Finally we call scan_devices() which adds all the devices found in the
440 * lguest_devices page. */ 481 * lguest_devices page.
482 */
441static int __init lguest_devices_init(void) 483static int __init lguest_devices_init(void)
442{ 484{
443 if (strcmp(pv_info.name, "lguest") != 0) 485 if (strcmp(pv_info.name, "lguest") != 0)
@@ -456,11 +498,13 @@ static int __init lguest_devices_init(void)
456/* We do this after core stuff, but before the drivers. */ 498/* We do this after core stuff, but before the drivers. */
457postcore_initcall(lguest_devices_init); 499postcore_initcall(lguest_devices_init);
458 500
459/*D:150 At this point in the journey we used to now wade through the lguest 501/*D:150
502 * At this point in the journey we used to now wade through the lguest
460 * devices themselves: net, block and console. Since they're all now virtio 503 * devices themselves: net, block and console. Since they're all now virtio
461 * devices rather than lguest-specific, I've decided to ignore them. Mostly, 504 * devices rather than lguest-specific, I've decided to ignore them. Mostly,
462 * they're kind of boring. But this does mean you'll never experience the 505 * they're kind of boring. But this does mean you'll never experience the
463 * thrill of reading the forbidden love scene buried deep in the block driver. 506 * thrill of reading the forbidden love scene buried deep in the block driver.
464 * 507 *
465 * "make Launcher" beckons, where we answer questions like "Where do Guests 508 * "make Launcher" beckons, where we answer questions like "Where do Guests
466 * come from?", and "What do you do when someone asks for optimization?". */ 509 * come from?", and "What do you do when someone asks for optimization?".
510 */
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 32e297121058..b4d3f7ca554f 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -1,8 +1,9 @@
1/*P:200 This contains all the /dev/lguest code, whereby the userspace launcher 1/*P:200 This contains all the /dev/lguest code, whereby the userspace launcher
2 * controls and communicates with the Guest. For example, the first write will 2 * controls and communicates with the Guest. For example, the first write will
3 * tell us the Guest's memory layout, pagetable, entry point and kernel address 3 * tell us the Guest's memory layout and entry point. A read will run the
4 * offset. A read will run the Guest until something happens, such as a signal 4 * Guest until something happens, such as a signal or the Guest doing a NOTIFY
5 * or the Guest doing a NOTIFY out to the Launcher. :*/ 5 * out to the Launcher.
6:*/
6#include <linux/uaccess.h> 7#include <linux/uaccess.h>
7#include <linux/miscdevice.h> 8#include <linux/miscdevice.h>
8#include <linux/fs.h> 9#include <linux/fs.h>
@@ -11,14 +12,41 @@
11#include <linux/file.h> 12#include <linux/file.h>
12#include "lg.h" 13#include "lg.h"
13 14
15/*L:056
16 * Before we move on, let's jump ahead and look at what the kernel does when
17 * it needs to look up the eventfds. That will complete our picture of how we
18 * use RCU.
19 *
20 * The notification value is in cpu->pending_notify: we return true if it went
21 * to an eventfd.
22 */
14bool send_notify_to_eventfd(struct lg_cpu *cpu) 23bool send_notify_to_eventfd(struct lg_cpu *cpu)
15{ 24{
16 unsigned int i; 25 unsigned int i;
17 struct lg_eventfd_map *map; 26 struct lg_eventfd_map *map;
18 27
19 /* lg->eventfds is RCU-protected */ 28 /*
29 * This "rcu_read_lock()" helps track when someone is still looking at
30 * the (RCU-using) eventfds array. It's not actually a lock at all;
31 * indeed it's a noop in many configurations. (You didn't expect me to
32 * explain all the RCU secrets here, did you?)
33 */
20 rcu_read_lock(); 34 rcu_read_lock();
35 /*
36 * rcu_dereference is the counter-side of rcu_assign_pointer(); it
37 * makes sure we don't access the memory pointed to by
38 * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy,
39 * but Alpha allows this! Paul McKenney points out that a really
40 * aggressive compiler could have the same effect:
41 * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html
42 *
43 * So play safe, use rcu_dereference to get the rcu-protected pointer:
44 */
21 map = rcu_dereference(cpu->lg->eventfds); 45 map = rcu_dereference(cpu->lg->eventfds);
46 /*
47 * Simple array search: even if they add an eventfd while we do this,
48 * we'll continue to use the old array and just won't see the new one.
49 */
22 for (i = 0; i < map->num; i++) { 50 for (i = 0; i < map->num; i++) {
23 if (map->map[i].addr == cpu->pending_notify) { 51 if (map->map[i].addr == cpu->pending_notify) {
24 eventfd_signal(map->map[i].event, 1); 52 eventfd_signal(map->map[i].event, 1);
@@ -26,19 +54,50 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu)
26 break; 54 break;
27 } 55 }
28 } 56 }
57 /* We're done with the rcu-protected variable cpu->lg->eventfds. */
29 rcu_read_unlock(); 58 rcu_read_unlock();
59
60 /* If we cleared the notification, it's because we found a match. */
30 return cpu->pending_notify == 0; 61 return cpu->pending_notify == 0;
31} 62}
32 63
64/*L:055
65 * One of the more tricksy tricks in the Linux Kernel is a technique called
66 * Read Copy Update. Since one point of lguest is to teach lguest journeyers
67 * about kernel coding, I use it here. (In case you're curious, other purposes
68 * include learning about virtualization and instilling a deep appreciation for
69 * simplicity and puppies).
70 *
71 * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we
72 * add new eventfds without ever blocking readers from accessing the array.
73 * The current Launcher only does this during boot, so that never happens. But
74 * Read Copy Update is cool, and adding a lock risks damaging even more puppies
75 * than this code does.
76 *
77 * We allocate a brand new one-larger array, copy the old one and add our new
78 * element. Then we make the lg eventfd pointer point to the new array.
79 * That's the easy part: now we need to free the old one, but we need to make
80 * sure no slow CPU somewhere is still looking at it. That's what
81 * synchronize_rcu does for us: waits until every CPU has indicated that it has
82 * moved on to know it's no longer using the old one.
83 *
84 * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update.
85 */
33static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) 86static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
34{ 87{
35 struct lg_eventfd_map *new, *old = lg->eventfds; 88 struct lg_eventfd_map *new, *old = lg->eventfds;
36 89
90 /*
91 * We don't allow notifications on value 0 anyway (pending_notify of
92 * 0 means "nothing pending").
93 */
37 if (!addr) 94 if (!addr)
38 return -EINVAL; 95 return -EINVAL;
39 96
40 /* Replace the old array with the new one, carefully: others can 97 /*
41 * be accessing it at the same time */ 98 * Replace the old array with the new one, carefully: others can
99 * be accessing it at the same time.
100 */
42 new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), 101 new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
43 GFP_KERNEL); 102 GFP_KERNEL);
44 if (!new) 103 if (!new)
@@ -50,24 +109,43 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
50 109
51 /* Now append new entry. */ 110 /* Now append new entry. */
52 new->map[new->num].addr = addr; 111 new->map[new->num].addr = addr;
53 new->map[new->num].event = eventfd_fget(fd); 112 new->map[new->num].event = eventfd_ctx_fdget(fd);
54 if (IS_ERR(new->map[new->num].event)) { 113 if (IS_ERR(new->map[new->num].event)) {
114 int err = PTR_ERR(new->map[new->num].event);
55 kfree(new); 115 kfree(new);
56 return PTR_ERR(new->map[new->num].event); 116 return err;
57 } 117 }
58 new->num++; 118 new->num++;
59 119
60 /* Now put new one in place. */ 120 /*
121 * Now put new one in place: rcu_assign_pointer() is a fancy way of
122 * doing "lg->eventfds = new", but it uses memory barriers to make
123 * absolutely sure that the contents of "new" written above is nailed
124 * down before we actually do the assignment.
125 *
126 * We have to think about these kinds of things when we're operating on
127 * live data without locks.
128 */
61 rcu_assign_pointer(lg->eventfds, new); 129 rcu_assign_pointer(lg->eventfds, new);
62 130
63 /* We're not in a big hurry. Wait until noone's looking at old 131 /*
64 * version, then delete it. */ 132 * We're not in a big hurry. Wait until noone's looking at old
133 * version, then free it.
134 */
65 synchronize_rcu(); 135 synchronize_rcu();
66 kfree(old); 136 kfree(old);
67 137
68 return 0; 138 return 0;
69} 139}
70 140
141/*L:052
142 * Receiving notifications from the Guest is usually done by attaching a
143 * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will
144 * become readable when the Guest does an LHCALL_NOTIFY with that value.
145 *
146 * This is really convenient for processing each virtqueue in a separate
147 * thread.
148 */
71static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) 149static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
72{ 150{
73 unsigned long addr, fd; 151 unsigned long addr, fd;
@@ -79,15 +157,22 @@ static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
79 if (get_user(fd, input) != 0) 157 if (get_user(fd, input) != 0)
80 return -EFAULT; 158 return -EFAULT;
81 159
160 /*
161 * Just make sure two callers don't add eventfds at once. We really
162 * only need to lock against callers adding to the same Guest, so using
163 * the Big Lguest Lock is overkill. But this is setup, not a fast path.
164 */
82 mutex_lock(&lguest_lock); 165 mutex_lock(&lguest_lock);
83 err = add_eventfd(lg, addr, fd); 166 err = add_eventfd(lg, addr, fd);
84 mutex_unlock(&lguest_lock); 167 mutex_unlock(&lguest_lock);
85 168
86 return 0; 169 return err;
87} 170}
88 171
89/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt 172/*L:050
90 * number to /dev/lguest. */ 173 * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
174 * number to /dev/lguest.
175 */
91static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) 176static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
92{ 177{
93 unsigned long irq; 178 unsigned long irq;
@@ -97,12 +182,18 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
97 if (irq >= LGUEST_IRQS) 182 if (irq >= LGUEST_IRQS)
98 return -EINVAL; 183 return -EINVAL;
99 184
185 /*
186 * Next time the Guest runs, the core code will see if it can deliver
187 * this interrupt.
188 */
100 set_interrupt(cpu, irq); 189 set_interrupt(cpu, irq);
101 return 0; 190 return 0;
102} 191}
103 192
104/*L:040 Once our Guest is initialized, the Launcher makes it run by reading 193/*L:040
105 * from /dev/lguest. */ 194 * Once our Guest is initialized, the Launcher makes it run by reading
195 * from /dev/lguest.
196 */
106static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) 197static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
107{ 198{
108 struct lguest *lg = file->private_data; 199 struct lguest *lg = file->private_data;
@@ -138,8 +229,10 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
138 return len; 229 return len;
139 } 230 }
140 231
141 /* If we returned from read() last time because the Guest sent I/O, 232 /*
142 * clear the flag. */ 233 * If we returned from read() last time because the Guest sent I/O,
234 * clear the flag.
235 */
143 if (cpu->pending_notify) 236 if (cpu->pending_notify)
144 cpu->pending_notify = 0; 237 cpu->pending_notify = 0;
145 238
@@ -147,8 +240,10 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
147 return run_guest(cpu, (unsigned long __user *)user); 240 return run_guest(cpu, (unsigned long __user *)user);
148} 241}
149 242
150/*L:025 This actually initializes a CPU. For the moment, a Guest is only 243/*L:025
151 * uniprocessor, so "id" is always 0. */ 244 * This actually initializes a CPU. For the moment, a Guest is only
245 * uniprocessor, so "id" is always 0.
246 */
152static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) 247static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
153{ 248{
154 /* We have a limited number the number of CPUs in the lguest struct. */ 249 /* We have a limited number the number of CPUs in the lguest struct. */
@@ -163,8 +258,10 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
163 /* Each CPU has a timer it can set. */ 258 /* Each CPU has a timer it can set. */
164 init_clockdev(cpu); 259 init_clockdev(cpu);
165 260
166 /* We need a complete page for the Guest registers: they are accessible 261 /*
167 * to the Guest and we can only grant it access to whole pages. */ 262 * We need a complete page for the Guest registers: they are accessible
263 * to the Guest and we can only grant it access to whole pages.
264 */
168 cpu->regs_page = get_zeroed_page(GFP_KERNEL); 265 cpu->regs_page = get_zeroed_page(GFP_KERNEL);
169 if (!cpu->regs_page) 266 if (!cpu->regs_page)
170 return -ENOMEM; 267 return -ENOMEM;
@@ -172,29 +269,38 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
172 /* We actually put the registers at the bottom of the page. */ 269 /* We actually put the registers at the bottom of the page. */
173 cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs); 270 cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);
174 271
175 /* Now we initialize the Guest's registers, handing it the start 272 /*
176 * address. */ 273 * Now we initialize the Guest's registers, handing it the start
274 * address.
275 */
177 lguest_arch_setup_regs(cpu, start_ip); 276 lguest_arch_setup_regs(cpu, start_ip);
178 277
179 /* We keep a pointer to the Launcher task (ie. current task) for when 278 /*
180 * other Guests want to wake this one (eg. console input). */ 279 * We keep a pointer to the Launcher task (ie. current task) for when
280 * other Guests want to wake this one (eg. console input).
281 */
181 cpu->tsk = current; 282 cpu->tsk = current;
182 283
183 /* We need to keep a pointer to the Launcher's memory map, because if 284 /*
285 * We need to keep a pointer to the Launcher's memory map, because if
184 * the Launcher dies we need to clean it up. If we don't keep a 286 * the Launcher dies we need to clean it up. If we don't keep a
185 * reference, it is destroyed before close() is called. */ 287 * reference, it is destroyed before close() is called.
288 */
186 cpu->mm = get_task_mm(cpu->tsk); 289 cpu->mm = get_task_mm(cpu->tsk);
187 290
188 /* We remember which CPU's pages this Guest used last, for optimization 291 /*
189 * when the same Guest runs on the same CPU twice. */ 292 * We remember which CPU's pages this Guest used last, for optimization
293 * when the same Guest runs on the same CPU twice.
294 */
190 cpu->last_pages = NULL; 295 cpu->last_pages = NULL;
191 296
192 /* No error == success. */ 297 /* No error == success. */
193 return 0; 298 return 0;
194} 299}
195 300
196/*L:020 The initialization write supplies 3 pointer sized (32 or 64 bit) 301/*L:020
197 * values (in addition to the LHREQ_INITIALIZE value). These are: 302 * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in
303 * addition to the LHREQ_INITIALIZE value). These are:
198 * 304 *
199 * base: The start of the Guest-physical memory inside the Launcher memory. 305 * base: The start of the Guest-physical memory inside the Launcher memory.
200 * 306 *
@@ -206,14 +312,15 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
206 */ 312 */
207static int initialize(struct file *file, const unsigned long __user *input) 313static int initialize(struct file *file, const unsigned long __user *input)
208{ 314{
209 /* "struct lguest" contains everything we (the Host) know about a 315 /* "struct lguest" contains all we (the Host) know about a Guest. */
210 * Guest. */
211 struct lguest *lg; 316 struct lguest *lg;
212 int err; 317 int err;
213 unsigned long args[3]; 318 unsigned long args[3];
214 319
215 /* We grab the Big Lguest lock, which protects against multiple 320 /*
216 * simultaneous initializations. */ 321 * We grab the Big Lguest lock, which protects against multiple
322 * simultaneous initializations.
323 */
217 mutex_lock(&lguest_lock); 324 mutex_lock(&lguest_lock);
218 /* You can't initialize twice! Close the device and start again... */ 325 /* You can't initialize twice! Close the device and start again... */
219 if (file->private_data) { 326 if (file->private_data) {
@@ -248,8 +355,10 @@ static int initialize(struct file *file, const unsigned long __user *input)
248 if (err) 355 if (err)
249 goto free_eventfds; 356 goto free_eventfds;
250 357
251 /* Initialize the Guest's shadow page tables, using the toplevel 358 /*
252 * address the Launcher gave us. This allocates memory, so can fail. */ 359 * Initialize the Guest's shadow page tables, using the toplevel
360 * address the Launcher gave us. This allocates memory, so can fail.
361 */
253 err = init_guest_pagetable(lg); 362 err = init_guest_pagetable(lg);
254 if (err) 363 if (err)
255 goto free_regs; 364 goto free_regs;
@@ -274,20 +383,24 @@ unlock:
274 return err; 383 return err;
275} 384}
276 385
277/*L:010 The first operation the Launcher does must be a write. All writes 386/*L:010
387 * The first operation the Launcher does must be a write. All writes
278 * start with an unsigned long number: for the first write this must be 388 * start with an unsigned long number: for the first write this must be
279 * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use 389 * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use
280 * writes of other values to send interrupts. 390 * writes of other values to send interrupts or set up receipt of notifications.
281 * 391 *
282 * Note that we overload the "offset" in the /dev/lguest file to indicate what 392 * Note that we overload the "offset" in the /dev/lguest file to indicate what
283 * CPU number we're dealing with. Currently this is always 0, since we only 393 * CPU number we're dealing with. Currently this is always 0 since we only
284 * support uniprocessor Guests, but you can see the beginnings of SMP support 394 * support uniprocessor Guests, but you can see the beginnings of SMP support
285 * here. */ 395 * here.
396 */
286static ssize_t write(struct file *file, const char __user *in, 397static ssize_t write(struct file *file, const char __user *in,
287 size_t size, loff_t *off) 398 size_t size, loff_t *off)
288{ 399{
289 /* Once the Guest is initialized, we hold the "struct lguest" in the 400 /*
290 * file private data. */ 401 * Once the Guest is initialized, we hold the "struct lguest" in the
402 * file private data.
403 */
291 struct lguest *lg = file->private_data; 404 struct lguest *lg = file->private_data;
292 const unsigned long __user *input = (const unsigned long __user *)in; 405 const unsigned long __user *input = (const unsigned long __user *)in;
293 unsigned long req; 406 unsigned long req;
@@ -322,13 +435,15 @@ static ssize_t write(struct file *file, const char __user *in,
322 } 435 }
323} 436}
324 437
325/*L:060 The final piece of interface code is the close() routine. It reverses 438/*L:060
439 * The final piece of interface code is the close() routine. It reverses
326 * everything done in initialize(). This is usually called because the 440 * everything done in initialize(). This is usually called because the
327 * Launcher exited. 441 * Launcher exited.
328 * 442 *
329 * Note that the close routine returns 0 or a negative error number: it can't 443 * Note that the close routine returns 0 or a negative error number: it can't
330 * really fail, but it can whine. I blame Sun for this wart, and K&R C for 444 * really fail, but it can whine. I blame Sun for this wart, and K&R C for
331 * letting them do it. :*/ 445 * letting them do it.
446:*/
332static int close(struct inode *inode, struct file *file) 447static int close(struct inode *inode, struct file *file)
333{ 448{
334 struct lguest *lg = file->private_data; 449 struct lguest *lg = file->private_data;
@@ -338,8 +453,10 @@ static int close(struct inode *inode, struct file *file)
338 if (!lg) 453 if (!lg)
339 return 0; 454 return 0;
340 455
341 /* We need the big lock, to protect from inter-guest I/O and other 456 /*
342 * Launchers initializing guests. */ 457 * We need the big lock, to protect from inter-guest I/O and other
458 * Launchers initializing guests.
459 */
343 mutex_lock(&lguest_lock); 460 mutex_lock(&lguest_lock);
344 461
345 /* Free up the shadow page tables for the Guest. */ 462 /* Free up the shadow page tables for the Guest. */
@@ -350,18 +467,22 @@ static int close(struct inode *inode, struct file *file)
350 hrtimer_cancel(&lg->cpus[i].hrt); 467 hrtimer_cancel(&lg->cpus[i].hrt);
351 /* We can free up the register page we allocated. */ 468 /* We can free up the register page we allocated. */
352 free_page(lg->cpus[i].regs_page); 469 free_page(lg->cpus[i].regs_page);
353 /* Now all the memory cleanups are done, it's safe to release 470 /*
354 * the Launcher's memory management structure. */ 471 * Now all the memory cleanups are done, it's safe to release
472 * the Launcher's memory management structure.
473 */
355 mmput(lg->cpus[i].mm); 474 mmput(lg->cpus[i].mm);
356 } 475 }
357 476
358 /* Release any eventfds they registered. */ 477 /* Release any eventfds they registered. */
359 for (i = 0; i < lg->eventfds->num; i++) 478 for (i = 0; i < lg->eventfds->num; i++)
360 fput(lg->eventfds->map[i].event); 479 eventfd_ctx_put(lg->eventfds->map[i].event);
361 kfree(lg->eventfds); 480 kfree(lg->eventfds);
362 481
363 /* If lg->dead doesn't contain an error code it will be NULL or a 482 /*
364 * kmalloc()ed string, either of which is ok to hand to kfree(). */ 483 * If lg->dead doesn't contain an error code it will be NULL or a
484 * kmalloc()ed string, either of which is ok to hand to kfree().
485 */
365 if (!IS_ERR(lg->dead)) 486 if (!IS_ERR(lg->dead))
366 kfree(lg->dead); 487 kfree(lg->dead);
367 /* Free the memory allocated to the lguest_struct */ 488 /* Free the memory allocated to the lguest_struct */
@@ -385,7 +506,8 @@ static int close(struct inode *inode, struct file *file)
385 * 506 *
386 * We begin our understanding with the Host kernel interface which the Launcher 507 * We begin our understanding with the Host kernel interface which the Launcher
387 * uses: reading and writing a character device called /dev/lguest. All the 508 * uses: reading and writing a character device called /dev/lguest. All the
388 * work happens in the read(), write() and close() routines: */ 509 * work happens in the read(), write() and close() routines:
510 */
389static struct file_operations lguest_fops = { 511static struct file_operations lguest_fops = {
390 .owner = THIS_MODULE, 512 .owner = THIS_MODULE,
391 .release = close, 513 .release = close,
@@ -393,8 +515,10 @@ static struct file_operations lguest_fops = {
393 .read = read, 515 .read = read,
394}; 516};
395 517
396/* This is a textbook example of a "misc" character device. Populate a "struct 518/*
397 * miscdevice" and register it with misc_register(). */ 519 * This is a textbook example of a "misc" character device. Populate a "struct
520 * miscdevice" and register it with misc_register().
521 */
398static struct miscdevice lguest_dev = { 522static struct miscdevice lguest_dev = {
399 .minor = MISC_DYNAMIC_MINOR, 523 .minor = MISC_DYNAMIC_MINOR,
400 .name = "lguest", 524 .name = "lguest",
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index a6fe1abda240..a8d0aee3bc0e 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1,9 +1,11 @@
1/*P:700 The pagetable code, on the other hand, still shows the scars of 1/*P:700
2 * The pagetable code, on the other hand, still shows the scars of
2 * previous encounters. It's functional, and as neat as it can be in the 3 * previous encounters. It's functional, and as neat as it can be in the
3 * circumstances, but be wary, for these things are subtle and break easily. 4 * circumstances, but be wary, for these things are subtle and break easily.
4 * The Guest provides a virtual to physical mapping, but we can neither trust 5 * The Guest provides a virtual to physical mapping, but we can neither trust
5 * it nor use it: we verify and convert it here then point the CPU to the 6 * it nor use it: we verify and convert it here then point the CPU to the
6 * converted Guest pages when running the Guest. :*/ 7 * converted Guest pages when running the Guest.
8:*/
7 9
8/* Copyright (C) Rusty Russell IBM Corporation 2006. 10/* Copyright (C) Rusty Russell IBM Corporation 2006.
9 * GPL v2 and any later version */ 11 * GPL v2 and any later version */
@@ -17,18 +19,20 @@
17#include <asm/bootparam.h> 19#include <asm/bootparam.h>
18#include "lg.h" 20#include "lg.h"
19 21
20/*M:008 We hold reference to pages, which prevents them from being swapped. 22/*M:008
23 * We hold reference to pages, which prevents them from being swapped.
21 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants 24 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
22 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we 25 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
23 * could probably consider launching Guests as non-root. :*/ 26 * could probably consider launching Guests as non-root.
27:*/
24 28
25/*H:300 29/*H:300
26 * The Page Table Code 30 * The Page Table Code
27 * 31 *
28 * We use two-level page tables for the Guest. If you're not entirely 32 * We use two-level page tables for the Guest, or three-level with PAE. If
29 * comfortable with virtual addresses, physical addresses and page tables then 33 * you're not entirely comfortable with virtual addresses, physical addresses
30 * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with 34 * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
31 * diagrams!). 35 * Table Handling" (with diagrams!).
32 * 36 *
33 * The Guest keeps page tables, but we maintain the actual ones here: these are 37 * The Guest keeps page tables, but we maintain the actual ones here: these are
34 * called "shadow" page tables. Which is a very Guest-centric name: these are 38 * called "shadow" page tables. Which is a very Guest-centric name: these are
@@ -45,16 +49,18 @@
45 * (v) Flushing (throwing away) page tables, 49 * (v) Flushing (throwing away) page tables,
46 * (vi) Mapping the Switcher when the Guest is about to run, 50 * (vi) Mapping the Switcher when the Guest is about to run,
47 * (vii) Setting up the page tables initially. 51 * (vii) Setting up the page tables initially.
48 :*/ 52:*/
49 53
50 54/*
51/* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is 55 * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB)
52 * conveniently placed at the top 4MB, so it uses a separate, complete PTE 56 * or 512 PTE entries with PAE (2MB).
53 * page. */ 57 */
54#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) 58#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
55 59
56/* For PAE we need the PMD index as well. We use the last 2MB, so we 60/*
57 * will need the last pmd entry of the last pmd page. */ 61 * For PAE we need the PMD index as well. We use the last 2MB, so we
62 * will need the last pmd entry of the last pmd page.
63 */
58#ifdef CONFIG_X86_PAE 64#ifdef CONFIG_X86_PAE
59#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) 65#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
60#define RESERVE_MEM 2U 66#define RESERVE_MEM 2U
@@ -64,14 +70,18 @@
64#define CHECK_GPGD_MASK _PAGE_TABLE 70#define CHECK_GPGD_MASK _PAGE_TABLE
65#endif 71#endif
66 72
67/* We actually need a separate PTE page for each CPU. Remember that after the 73/*
74 * We actually need a separate PTE page for each CPU. Remember that after the
68 * Switcher code itself comes two pages for each CPU, and we don't want this 75 * Switcher code itself comes two pages for each CPU, and we don't want this
69 * CPU's guest to see the pages of any other CPU. */ 76 * CPU's guest to see the pages of any other CPU.
77 */
70static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); 78static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
71#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) 79#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
72 80
73/*H:320 The page table code is curly enough to need helper functions to keep it 81/*H:320
74 * clear and clean. 82 * The page table code is curly enough to need helper functions to keep it
83 * clear and clean. The kernel itself provides many of them; one advantage
84 * of insisting that the Guest and Host use the same CONFIG_PAE setting.
75 * 85 *
76 * There are two functions which return pointers to the shadow (aka "real") 86 * There are two functions which return pointers to the shadow (aka "real")
77 * page tables. 87 * page tables.
@@ -79,7 +89,8 @@ static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
79 * spgd_addr() takes the virtual address and returns a pointer to the top-level 89 * spgd_addr() takes the virtual address and returns a pointer to the top-level
80 * page directory entry (PGD) for that address. Since we keep track of several 90 * page directory entry (PGD) for that address. Since we keep track of several
81 * page tables, the "i" argument tells us which one we're interested in (it's 91 * page tables, the "i" argument tells us which one we're interested in (it's
82 * usually the current one). */ 92 * usually the current one).
93 */
83static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) 94static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
84{ 95{
85 unsigned int index = pgd_index(vaddr); 96 unsigned int index = pgd_index(vaddr);
@@ -96,9 +107,11 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
96} 107}
97 108
98#ifdef CONFIG_X86_PAE 109#ifdef CONFIG_X86_PAE
99/* This routine then takes the PGD entry given above, which contains the 110/*
111 * This routine then takes the PGD entry given above, which contains the
100 * address of the PMD page. It then returns a pointer to the PMD entry for the 112 * address of the PMD page. It then returns a pointer to the PMD entry for the
101 * given address. */ 113 * given address.
114 */
102static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) 115static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
103{ 116{
104 unsigned int index = pmd_index(vaddr); 117 unsigned int index = pmd_index(vaddr);
@@ -119,9 +132,11 @@ static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
119} 132}
120#endif 133#endif
121 134
122/* This routine then takes the page directory entry returned above, which 135/*
136 * This routine then takes the page directory entry returned above, which
123 * contains the address of the page table entry (PTE) page. It then returns a 137 * contains the address of the page table entry (PTE) page. It then returns a
124 * pointer to the PTE entry for the given address. */ 138 * pointer to the PTE entry for the given address.
139 */
125static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) 140static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
126{ 141{
127#ifdef CONFIG_X86_PAE 142#ifdef CONFIG_X86_PAE
@@ -139,8 +154,10 @@ static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
139 return &page[pte_index(vaddr)]; 154 return &page[pte_index(vaddr)];
140} 155}
141 156
142/* These two functions just like the above two, except they access the Guest 157/*
143 * page tables. Hence they return a Guest address. */ 158 * These functions are just like the above two, except they access the Guest
159 * page tables. Hence they return a Guest address.
160 */
144static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) 161static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
145{ 162{
146 unsigned int index = vaddr >> (PGDIR_SHIFT); 163 unsigned int index = vaddr >> (PGDIR_SHIFT);
@@ -148,6 +165,7 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
148} 165}
149 166
150#ifdef CONFIG_X86_PAE 167#ifdef CONFIG_X86_PAE
168/* Follow the PGD to the PMD. */
151static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) 169static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
152{ 170{
153 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; 171 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
@@ -155,6 +173,7 @@ static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
155 return gpage + pmd_index(vaddr) * sizeof(pmd_t); 173 return gpage + pmd_index(vaddr) * sizeof(pmd_t);
156} 174}
157 175
176/* Follow the PMD to the PTE. */
158static unsigned long gpte_addr(struct lg_cpu *cpu, 177static unsigned long gpte_addr(struct lg_cpu *cpu,
159 pmd_t gpmd, unsigned long vaddr) 178 pmd_t gpmd, unsigned long vaddr)
160{ 179{
@@ -164,6 +183,7 @@ static unsigned long gpte_addr(struct lg_cpu *cpu,
164 return gpage + pte_index(vaddr) * sizeof(pte_t); 183 return gpage + pte_index(vaddr) * sizeof(pte_t);
165} 184}
166#else 185#else
186/* Follow the PGD to the PTE (no mid-level for !PAE). */
167static unsigned long gpte_addr(struct lg_cpu *cpu, 187static unsigned long gpte_addr(struct lg_cpu *cpu,
168 pgd_t gpgd, unsigned long vaddr) 188 pgd_t gpgd, unsigned long vaddr)
169{ 189{
@@ -175,17 +195,21 @@ static unsigned long gpte_addr(struct lg_cpu *cpu,
175#endif 195#endif
176/*:*/ 196/*:*/
177 197
178/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as 198/*M:014
179 * an optimization (ie. pre-faulting). :*/ 199 * get_pfn is slow: we could probably try to grab batches of pages here as
200 * an optimization (ie. pre-faulting).
201:*/
180 202
181/*H:350 This routine takes a page number given by the Guest and converts it to 203/*H:350
204 * This routine takes a page number given by the Guest and converts it to
182 * an actual, physical page number. It can fail for several reasons: the 205 * an actual, physical page number. It can fail for several reasons: the
183 * virtual address might not be mapped by the Launcher, the write flag is set 206 * virtual address might not be mapped by the Launcher, the write flag is set
184 * and the page is read-only, or the write flag was set and the page was 207 * and the page is read-only, or the write flag was set and the page was
185 * shared so had to be copied, but we ran out of memory. 208 * shared so had to be copied, but we ran out of memory.
186 * 209 *
187 * This holds a reference to the page, so release_pte() is careful to put that 210 * This holds a reference to the page, so release_pte() is careful to put that
188 * back. */ 211 * back.
212 */
189static unsigned long get_pfn(unsigned long virtpfn, int write) 213static unsigned long get_pfn(unsigned long virtpfn, int write)
190{ 214{
191 struct page *page; 215 struct page *page;
@@ -198,33 +222,41 @@ static unsigned long get_pfn(unsigned long virtpfn, int write)
198 return -1UL; 222 return -1UL;
199} 223}
200 224
201/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table 225/*H:340
226 * Converting a Guest page table entry to a shadow (ie. real) page table
202 * entry can be a little tricky. The flags are (almost) the same, but the 227 * entry can be a little tricky. The flags are (almost) the same, but the
203 * Guest PTE contains a virtual page number: the CPU needs the real page 228 * Guest PTE contains a virtual page number: the CPU needs the real page
204 * number. */ 229 * number.
230 */
205static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) 231static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
206{ 232{
207 unsigned long pfn, base, flags; 233 unsigned long pfn, base, flags;
208 234
209 /* The Guest sets the global flag, because it thinks that it is using 235 /*
236 * The Guest sets the global flag, because it thinks that it is using
210 * PGE. We only told it to use PGE so it would tell us whether it was 237 * PGE. We only told it to use PGE so it would tell us whether it was
211 * flushing a kernel mapping or a userspace mapping. We don't actually 238 * flushing a kernel mapping or a userspace mapping. We don't actually
212 * use the global bit, so throw it away. */ 239 * use the global bit, so throw it away.
240 */
213 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); 241 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
214 242
215 /* The Guest's pages are offset inside the Launcher. */ 243 /* The Guest's pages are offset inside the Launcher. */
216 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; 244 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
217 245
218 /* We need a temporary "unsigned long" variable to hold the answer from 246 /*
247 * We need a temporary "unsigned long" variable to hold the answer from
219 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't 248 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
220 * fit in spte.pfn. get_pfn() finds the real physical number of the 249 * fit in spte.pfn. get_pfn() finds the real physical number of the
221 * page, given the virtual number. */ 250 * page, given the virtual number.
251 */
222 pfn = get_pfn(base + pte_pfn(gpte), write); 252 pfn = get_pfn(base + pte_pfn(gpte), write);
223 if (pfn == -1UL) { 253 if (pfn == -1UL) {
224 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); 254 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
225 /* When we destroy the Guest, we'll go through the shadow page 255 /*
256 * When we destroy the Guest, we'll go through the shadow page
226 * tables and release_pte() them. Make sure we don't think 257 * tables and release_pte() them. Make sure we don't think
227 * this one is valid! */ 258 * this one is valid!
259 */
228 flags = 0; 260 flags = 0;
229 } 261 }
230 /* Now we assemble our shadow PTE from the page number and flags. */ 262 /* Now we assemble our shadow PTE from the page number and flags. */
@@ -234,8 +266,10 @@ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
234/*H:460 And to complete the chain, release_pte() looks like this: */ 266/*H:460 And to complete the chain, release_pte() looks like this: */
235static void release_pte(pte_t pte) 267static void release_pte(pte_t pte)
236{ 268{
237 /* Remember that get_user_pages_fast() took a reference to the page, in 269 /*
238 * get_pfn()? We have to put it back now. */ 270 * Remember that get_user_pages_fast() took a reference to the page, in
271 * get_pfn()? We have to put it back now.
272 */
239 if (pte_flags(pte) & _PAGE_PRESENT) 273 if (pte_flags(pte) & _PAGE_PRESENT)
240 put_page(pte_page(pte)); 274 put_page(pte_page(pte));
241} 275}
@@ -273,7 +307,8 @@ static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
273 * and return to the Guest without it knowing. 307 * and return to the Guest without it knowing.
274 * 308 *
275 * If we fixed up the fault (ie. we mapped the address), this routine returns 309 * If we fixed up the fault (ie. we mapped the address), this routine returns
276 * true. Otherwise, it was a real fault and we need to tell the Guest. */ 310 * true. Otherwise, it was a real fault and we need to tell the Guest.
311 */
277bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) 312bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
278{ 313{
279 pgd_t gpgd; 314 pgd_t gpgd;
@@ -282,6 +317,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
282 pte_t gpte; 317 pte_t gpte;
283 pte_t *spte; 318 pte_t *spte;
284 319
320 /* Mid level for PAE. */
285#ifdef CONFIG_X86_PAE 321#ifdef CONFIG_X86_PAE
286 pmd_t *spmd; 322 pmd_t *spmd;
287 pmd_t gpmd; 323 pmd_t gpmd;
@@ -298,22 +334,26 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
298 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { 334 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
299 /* No shadow entry: allocate a new shadow PTE page. */ 335 /* No shadow entry: allocate a new shadow PTE page. */
300 unsigned long ptepage = get_zeroed_page(GFP_KERNEL); 336 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
301 /* This is not really the Guest's fault, but killing it is 337 /*
302 * simple for this corner case. */ 338 * This is not really the Guest's fault, but killing it is
339 * simple for this corner case.
340 */
303 if (!ptepage) { 341 if (!ptepage) {
304 kill_guest(cpu, "out of memory allocating pte page"); 342 kill_guest(cpu, "out of memory allocating pte page");
305 return false; 343 return false;
306 } 344 }
307 /* We check that the Guest pgd is OK. */ 345 /* We check that the Guest pgd is OK. */
308 check_gpgd(cpu, gpgd); 346 check_gpgd(cpu, gpgd);
309 /* And we copy the flags to the shadow PGD entry. The page 347 /*
310 * number in the shadow PGD is the page we just allocated. */ 348 * And we copy the flags to the shadow PGD entry. The page
349 * number in the shadow PGD is the page we just allocated.
350 */
311 set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); 351 set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
312 } 352 }
313 353
314#ifdef CONFIG_X86_PAE 354#ifdef CONFIG_X86_PAE
315 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); 355 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
316 /* middle level not present? We can't map it in. */ 356 /* Middle level not present? We can't map it in. */
317 if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) 357 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
318 return false; 358 return false;
319 359
@@ -324,8 +364,10 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
324 /* No shadow entry: allocate a new shadow PTE page. */ 364 /* No shadow entry: allocate a new shadow PTE page. */
325 unsigned long ptepage = get_zeroed_page(GFP_KERNEL); 365 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
326 366
327 /* This is not really the Guest's fault, but killing it is 367 /*
328 * simple for this corner case. */ 368 * This is not really the Guest's fault, but killing it is
369 * simple for this corner case.
370 */
329 if (!ptepage) { 371 if (!ptepage) {
330 kill_guest(cpu, "out of memory allocating pte page"); 372 kill_guest(cpu, "out of memory allocating pte page");
331 return false; 373 return false;
@@ -334,27 +376,37 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
334 /* We check that the Guest pmd is OK. */ 376 /* We check that the Guest pmd is OK. */
335 check_gpmd(cpu, gpmd); 377 check_gpmd(cpu, gpmd);
336 378
337 /* And we copy the flags to the shadow PMD entry. The page 379 /*
338 * number in the shadow PMD is the page we just allocated. */ 380 * And we copy the flags to the shadow PMD entry. The page
381 * number in the shadow PMD is the page we just allocated.
382 */
339 native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); 383 native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
340 } 384 }
341 385
342 /* OK, now we look at the lower level in the Guest page table: keep its 386 /*
343 * address, because we might update it later. */ 387 * OK, now we look at the lower level in the Guest page table: keep its
388 * address, because we might update it later.
389 */
344 gpte_ptr = gpte_addr(cpu, gpmd, vaddr); 390 gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
345#else 391#else
346 /* OK, now we look at the lower level in the Guest page table: keep its 392 /*
347 * address, because we might update it later. */ 393 * OK, now we look at the lower level in the Guest page table: keep its
394 * address, because we might update it later.
395 */
348 gpte_ptr = gpte_addr(cpu, gpgd, vaddr); 396 gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
349#endif 397#endif
398
399 /* Read the actual PTE value. */
350 gpte = lgread(cpu, gpte_ptr, pte_t); 400 gpte = lgread(cpu, gpte_ptr, pte_t);
351 401
352 /* If this page isn't in the Guest page tables, we can't page it in. */ 402 /* If this page isn't in the Guest page tables, we can't page it in. */
353 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 403 if (!(pte_flags(gpte) & _PAGE_PRESENT))
354 return false; 404 return false;
355 405
356 /* Check they're not trying to write to a page the Guest wants 406 /*
357 * read-only (bit 2 of errcode == write). */ 407 * Check they're not trying to write to a page the Guest wants
408 * read-only (bit 2 of errcode == write).
409 */
358 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) 410 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
359 return false; 411 return false;
360 412
@@ -362,8 +414,10 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
362 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) 414 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
363 return false; 415 return false;
364 416
365 /* Check that the Guest PTE flags are OK, and the page number is below 417 /*
366 * the pfn_limit (ie. not mapping the Launcher binary). */ 418 * Check that the Guest PTE flags are OK, and the page number is below
419 * the pfn_limit (ie. not mapping the Launcher binary).
420 */
367 check_gpte(cpu, gpte); 421 check_gpte(cpu, gpte);
368 422
369 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ 423 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
@@ -373,29 +427,40 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
373 427
374 /* Get the pointer to the shadow PTE entry we're going to set. */ 428 /* Get the pointer to the shadow PTE entry we're going to set. */
375 spte = spte_addr(cpu, *spgd, vaddr); 429 spte = spte_addr(cpu, *spgd, vaddr);
376 /* If there was a valid shadow PTE entry here before, we release it. 430
377 * This can happen with a write to a previously read-only entry. */ 431 /*
432 * If there was a valid shadow PTE entry here before, we release it.
433 * This can happen with a write to a previously read-only entry.
434 */
378 release_pte(*spte); 435 release_pte(*spte);
379 436
380 /* If this is a write, we insist that the Guest page is writable (the 437 /*
381 * final arg to gpte_to_spte()). */ 438 * If this is a write, we insist that the Guest page is writable (the
439 * final arg to gpte_to_spte()).
440 */
382 if (pte_dirty(gpte)) 441 if (pte_dirty(gpte))
383 *spte = gpte_to_spte(cpu, gpte, 1); 442 *spte = gpte_to_spte(cpu, gpte, 1);
384 else 443 else
385 /* If this is a read, don't set the "writable" bit in the page 444 /*
445 * If this is a read, don't set the "writable" bit in the page
386 * table entry, even if the Guest says it's writable. That way 446 * table entry, even if the Guest says it's writable. That way
387 * we will come back here when a write does actually occur, so 447 * we will come back here when a write does actually occur, so
388 * we can update the Guest's _PAGE_DIRTY flag. */ 448 * we can update the Guest's _PAGE_DIRTY flag.
449 */
389 native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); 450 native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
390 451
391 /* Finally, we write the Guest PTE entry back: we've set the 452 /*
392 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ 453 * Finally, we write the Guest PTE entry back: we've set the
454 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
455 */
393 lgwrite(cpu, gpte_ptr, pte_t, gpte); 456 lgwrite(cpu, gpte_ptr, pte_t, gpte);
394 457
395 /* The fault is fixed, the page table is populated, the mapping 458 /*
459 * The fault is fixed, the page table is populated, the mapping
396 * manipulated, the result returned and the code complete. A small 460 * manipulated, the result returned and the code complete. A small
397 * delay and a trace of alliteration are the only indications the Guest 461 * delay and a trace of alliteration are the only indications the Guest
398 * has that a page fault occurred at all. */ 462 * has that a page fault occurred at all.
463 */
399 return true; 464 return true;
400} 465}
401 466
@@ -408,7 +473,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
408 * mapped, so it's overkill. 473 * mapped, so it's overkill.
409 * 474 *
410 * This is a quick version which answers the question: is this virtual address 475 * This is a quick version which answers the question: is this virtual address
411 * mapped by the shadow page tables, and is it writable? */ 476 * mapped by the shadow page tables, and is it writable?
477 */
412static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) 478static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
413{ 479{
414 pgd_t *spgd; 480 pgd_t *spgd;
@@ -428,21 +494,26 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
428 return false; 494 return false;
429#endif 495#endif
430 496
431 /* Check the flags on the pte entry itself: it must be present and 497 /*
432 * writable. */ 498 * Check the flags on the pte entry itself: it must be present and
499 * writable.
500 */
433 flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); 501 flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
434 502
435 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); 503 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
436} 504}
437 505
438/* So, when pin_stack_pages() asks us to pin a page, we check if it's already 506/*
507 * So, when pin_stack_pages() asks us to pin a page, we check if it's already
439 * in the page tables, and if not, we call demand_page() with error code 2 508 * in the page tables, and if not, we call demand_page() with error code 2
440 * (meaning "write"). */ 509 * (meaning "write").
510 */
441void pin_page(struct lg_cpu *cpu, unsigned long vaddr) 511void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
442{ 512{
443 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) 513 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
444 kill_guest(cpu, "bad stack page %#lx", vaddr); 514 kill_guest(cpu, "bad stack page %#lx", vaddr);
445} 515}
516/*:*/
446 517
447#ifdef CONFIG_X86_PAE 518#ifdef CONFIG_X86_PAE
448static void release_pmd(pmd_t *spmd) 519static void release_pmd(pmd_t *spmd)
@@ -479,15 +550,21 @@ static void release_pgd(pgd_t *spgd)
479} 550}
480 551
481#else /* !CONFIG_X86_PAE */ 552#else /* !CONFIG_X86_PAE */
482/*H:450 If we chase down the release_pgd() code, it looks like this: */ 553/*H:450
554 * If we chase down the release_pgd() code, the non-PAE version looks like
555 * this. The PAE version is almost identical, but instead of calling
556 * release_pte it calls release_pmd(), which looks much like this.
557 */
483static void release_pgd(pgd_t *spgd) 558static void release_pgd(pgd_t *spgd)
484{ 559{
485 /* If the entry's not present, there's nothing to release. */ 560 /* If the entry's not present, there's nothing to release. */
486 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 561 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
487 unsigned int i; 562 unsigned int i;
488 /* Converting the pfn to find the actual PTE page is easy: turn 563 /*
564 * Converting the pfn to find the actual PTE page is easy: turn
489 * the page number into a physical address, then convert to a 565 * the page number into a physical address, then convert to a
490 * virtual address (easy for kernel pages like this one). */ 566 * virtual address (easy for kernel pages like this one).
567 */
491 pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); 568 pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
492 /* For each entry in the page, we might need to release it. */ 569 /* For each entry in the page, we might need to release it. */
493 for (i = 0; i < PTRS_PER_PTE; i++) 570 for (i = 0; i < PTRS_PER_PTE; i++)
@@ -499,9 +576,12 @@ static void release_pgd(pgd_t *spgd)
499 } 576 }
500} 577}
501#endif 578#endif
502/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() 579
580/*H:445
581 * We saw flush_user_mappings() twice: once from the flush_user_mappings()
503 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. 582 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
504 * It simply releases every PTE page from 0 up to the Guest's kernel address. */ 583 * It simply releases every PTE page from 0 up to the Guest's kernel address.
584 */
505static void flush_user_mappings(struct lguest *lg, int idx) 585static void flush_user_mappings(struct lguest *lg, int idx)
506{ 586{
507 unsigned int i; 587 unsigned int i;
@@ -510,10 +590,12 @@ static void flush_user_mappings(struct lguest *lg, int idx)
510 release_pgd(lg->pgdirs[idx].pgdir + i); 590 release_pgd(lg->pgdirs[idx].pgdir + i);
511} 591}
512 592
513/*H:440 (v) Flushing (throwing away) page tables, 593/*H:440
594 * (v) Flushing (throwing away) page tables,
514 * 595 *
515 * The Guest has a hypercall to throw away the page tables: it's used when a 596 * The Guest has a hypercall to throw away the page tables: it's used when a
516 * large number of mappings have been changed. */ 597 * large number of mappings have been changed.
598 */
517void guest_pagetable_flush_user(struct lg_cpu *cpu) 599void guest_pagetable_flush_user(struct lg_cpu *cpu)
518{ 600{
519 /* Drop the userspace part of the current page table. */ 601 /* Drop the userspace part of the current page table. */
@@ -551,9 +633,11 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
551 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); 633 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
552} 634}
553 635
554/* We keep several page tables. This is a simple routine to find the page 636/*
637 * We keep several page tables. This is a simple routine to find the page
555 * table (if any) corresponding to this top-level address the Guest has given 638 * table (if any) corresponding to this top-level address the Guest has given
556 * us. */ 639 * us.
640 */
557static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) 641static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
558{ 642{
559 unsigned int i; 643 unsigned int i;
@@ -563,9 +647,11 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
563 return i; 647 return i;
564} 648}
565 649
566/*H:435 And this is us, creating the new page directory. If we really do 650/*H:435
651 * And this is us, creating the new page directory. If we really do
567 * allocate a new one (and so the kernel parts are not there), we set 652 * allocate a new one (and so the kernel parts are not there), we set
568 * blank_pgdir. */ 653 * blank_pgdir.
654 */
569static unsigned int new_pgdir(struct lg_cpu *cpu, 655static unsigned int new_pgdir(struct lg_cpu *cpu,
570 unsigned long gpgdir, 656 unsigned long gpgdir,
571 int *blank_pgdir) 657 int *blank_pgdir)
@@ -575,8 +661,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
575 pmd_t *pmd_table; 661 pmd_t *pmd_table;
576#endif 662#endif
577 663
578 /* We pick one entry at random to throw out. Choosing the Least 664 /*
579 * Recently Used might be better, but this is easy. */ 665 * We pick one entry at random to throw out. Choosing the Least
666 * Recently Used might be better, but this is easy.
667 */
580 next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); 668 next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
581 /* If it's never been allocated at all before, try now. */ 669 /* If it's never been allocated at all before, try now. */
582 if (!cpu->lg->pgdirs[next].pgdir) { 670 if (!cpu->lg->pgdirs[next].pgdir) {
@@ -587,8 +675,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
587 next = cpu->cpu_pgd; 675 next = cpu->cpu_pgd;
588 else { 676 else {
589#ifdef CONFIG_X86_PAE 677#ifdef CONFIG_X86_PAE
590 /* In PAE mode, allocate a pmd page and populate the 678 /*
591 * last pgd entry. */ 679 * In PAE mode, allocate a pmd page and populate the
680 * last pgd entry.
681 */
592 pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); 682 pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
593 if (!pmd_table) { 683 if (!pmd_table) {
594 free_page((long)cpu->lg->pgdirs[next].pgdir); 684 free_page((long)cpu->lg->pgdirs[next].pgdir);
@@ -598,8 +688,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
598 set_pgd(cpu->lg->pgdirs[next].pgdir + 688 set_pgd(cpu->lg->pgdirs[next].pgdir +
599 SWITCHER_PGD_INDEX, 689 SWITCHER_PGD_INDEX,
600 __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 690 __pgd(__pa(pmd_table) | _PAGE_PRESENT));
601 /* This is a blank page, so there are no kernel 691 /*
602 * mappings: caller must map the stack! */ 692 * This is a blank page, so there are no kernel
693 * mappings: caller must map the stack!
694 */
603 *blank_pgdir = 1; 695 *blank_pgdir = 1;
604 } 696 }
605#else 697#else
@@ -615,19 +707,23 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
615 return next; 707 return next;
616} 708}
617 709
618/*H:430 (iv) Switching page tables 710/*H:430
711 * (iv) Switching page tables
619 * 712 *
620 * Now we've seen all the page table setting and manipulation, let's see 713 * Now we've seen all the page table setting and manipulation, let's see
621 * what happens when the Guest changes page tables (ie. changes the top-level 714 * what happens when the Guest changes page tables (ie. changes the top-level
622 * pgdir). This occurs on almost every context switch. */ 715 * pgdir). This occurs on almost every context switch.
716 */
623void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) 717void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
624{ 718{
625 int newpgdir, repin = 0; 719 int newpgdir, repin = 0;
626 720
627 /* Look to see if we have this one already. */ 721 /* Look to see if we have this one already. */
628 newpgdir = find_pgdir(cpu->lg, pgtable); 722 newpgdir = find_pgdir(cpu->lg, pgtable);
629 /* If not, we allocate or mug an existing one: if it's a fresh one, 723 /*
630 * repin gets set to 1. */ 724 * If not, we allocate or mug an existing one: if it's a fresh one,
725 * repin gets set to 1.
726 */
631 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) 727 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
632 newpgdir = new_pgdir(cpu, pgtable, &repin); 728 newpgdir = new_pgdir(cpu, pgtable, &repin);
633 /* Change the current pgd index to the new one. */ 729 /* Change the current pgd index to the new one. */
@@ -637,9 +733,11 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
637 pin_stack_pages(cpu); 733 pin_stack_pages(cpu);
638} 734}
639 735
640/*H:470 Finally, a routine which throws away everything: all PGD entries in all 736/*H:470
737 * Finally, a routine which throws away everything: all PGD entries in all
641 * the shadow page tables, including the Guest's kernel mappings. This is used 738 * the shadow page tables, including the Guest's kernel mappings. This is used
642 * when we destroy the Guest. */ 739 * when we destroy the Guest.
740 */
643static void release_all_pagetables(struct lguest *lg) 741static void release_all_pagetables(struct lguest *lg)
644{ 742{
645 unsigned int i, j; 743 unsigned int i, j;
@@ -656,8 +754,10 @@ static void release_all_pagetables(struct lguest *lg)
656 spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; 754 spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
657 pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); 755 pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
658 756
659 /* And release the pmd entries of that pmd page, 757 /*
660 * except for the switcher pmd. */ 758 * And release the pmd entries of that pmd page,
759 * except for the switcher pmd.
760 */
661 for (k = 0; k < SWITCHER_PMD_INDEX; k++) 761 for (k = 0; k < SWITCHER_PMD_INDEX; k++)
662 release_pmd(&pmdpage[k]); 762 release_pmd(&pmdpage[k]);
663#endif 763#endif
@@ -667,10 +767,12 @@ static void release_all_pagetables(struct lguest *lg)
667 } 767 }
668} 768}
669 769
670/* We also throw away everything when a Guest tells us it's changed a kernel 770/*
771 * We also throw away everything when a Guest tells us it's changed a kernel
671 * mapping. Since kernel mappings are in every page table, it's easiest to 772 * mapping. Since kernel mappings are in every page table, it's easiest to
672 * throw them all away. This traps the Guest in amber for a while as 773 * throw them all away. This traps the Guest in amber for a while as
673 * everything faults back in, but it's rare. */ 774 * everything faults back in, but it's rare.
775 */
674void guest_pagetable_clear_all(struct lg_cpu *cpu) 776void guest_pagetable_clear_all(struct lg_cpu *cpu)
675{ 777{
676 release_all_pagetables(cpu->lg); 778 release_all_pagetables(cpu->lg);
@@ -678,15 +780,19 @@ void guest_pagetable_clear_all(struct lg_cpu *cpu)
678 pin_stack_pages(cpu); 780 pin_stack_pages(cpu);
679} 781}
680/*:*/ 782/*:*/
681/*M:009 Since we throw away all mappings when a kernel mapping changes, our 783
784/*M:009
785 * Since we throw away all mappings when a kernel mapping changes, our
682 * performance sucks for guests using highmem. In fact, a guest with 786 * performance sucks for guests using highmem. In fact, a guest with
683 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is 787 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
684 * usually slower than a Guest with less memory. 788 * usually slower than a Guest with less memory.
685 * 789 *
686 * This, of course, cannot be fixed. It would take some kind of... well, I 790 * This, of course, cannot be fixed. It would take some kind of... well, I
687 * don't know, but the term "puissant code-fu" comes to mind. :*/ 791 * don't know, but the term "puissant code-fu" comes to mind.
792:*/
688 793
689/*H:420 This is the routine which actually sets the page table entry for then 794/*H:420
795 * This is the routine which actually sets the page table entry for then
690 * "idx"'th shadow page table. 796 * "idx"'th shadow page table.
691 * 797 *
692 * Normally, we can just throw out the old entry and replace it with 0: if they 798 * Normally, we can just throw out the old entry and replace it with 0: if they
@@ -715,31 +821,36 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
715 spmd = spmd_addr(cpu, *spgd, vaddr); 821 spmd = spmd_addr(cpu, *spgd, vaddr);
716 if (pmd_flags(*spmd) & _PAGE_PRESENT) { 822 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
717#endif 823#endif
718 /* Otherwise, we start by releasing 824 /* Otherwise, start by releasing the existing entry. */
719 * the existing entry. */
720 pte_t *spte = spte_addr(cpu, *spgd, vaddr); 825 pte_t *spte = spte_addr(cpu, *spgd, vaddr);
721 release_pte(*spte); 826 release_pte(*spte);
722 827
723 /* If they're setting this entry as dirty or accessed, 828 /*
724 * we might as well put that entry they've given us 829 * If they're setting this entry as dirty or accessed,
725 * in now. This shaves 10% off a 830 * we might as well put that entry they've given us in
726 * copy-on-write micro-benchmark. */ 831 * now. This shaves 10% off a copy-on-write
832 * micro-benchmark.
833 */
727 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 834 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
728 check_gpte(cpu, gpte); 835 check_gpte(cpu, gpte);
729 native_set_pte(spte, 836 native_set_pte(spte,
730 gpte_to_spte(cpu, gpte, 837 gpte_to_spte(cpu, gpte,
731 pte_flags(gpte) & _PAGE_DIRTY)); 838 pte_flags(gpte) & _PAGE_DIRTY));
732 } else 839 } else {
733 /* Otherwise kill it and we can demand_page() 840 /*
734 * it in later. */ 841 * Otherwise kill it and we can demand_page()
842 * it in later.
843 */
735 native_set_pte(spte, __pte(0)); 844 native_set_pte(spte, __pte(0));
845 }
736#ifdef CONFIG_X86_PAE 846#ifdef CONFIG_X86_PAE
737 } 847 }
738#endif 848#endif
739 } 849 }
740} 850}
741 851
742/*H:410 Updating a PTE entry is a little trickier. 852/*H:410
853 * Updating a PTE entry is a little trickier.
743 * 854 *
744 * We keep track of several different page tables (the Guest uses one for each 855 * We keep track of several different page tables (the Guest uses one for each
745 * process, so it makes sense to cache at least a few). Each of these have 856 * process, so it makes sense to cache at least a few). Each of these have
@@ -748,12 +859,15 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
748 * all the page tables, not just the current one. This is rare. 859 * all the page tables, not just the current one. This is rare.
749 * 860 *
750 * The benefit is that when we have to track a new page table, we can keep all 861 * The benefit is that when we have to track a new page table, we can keep all
751 * the kernel mappings. This speeds up context switch immensely. */ 862 * the kernel mappings. This speeds up context switch immensely.
863 */
752void guest_set_pte(struct lg_cpu *cpu, 864void guest_set_pte(struct lg_cpu *cpu,
753 unsigned long gpgdir, unsigned long vaddr, pte_t gpte) 865 unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
754{ 866{
755 /* Kernel mappings must be changed on all top levels. Slow, but doesn't 867 /*
756 * happen often. */ 868 * Kernel mappings must be changed on all top levels. Slow, but doesn't
869 * happen often.
870 */
757 if (vaddr >= cpu->lg->kernel_address) { 871 if (vaddr >= cpu->lg->kernel_address) {
758 unsigned int i; 872 unsigned int i;
759 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) 873 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
@@ -795,19 +909,25 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
795 /* ... throw it away. */ 909 /* ... throw it away. */
796 release_pgd(lg->pgdirs[pgdir].pgdir + idx); 910 release_pgd(lg->pgdirs[pgdir].pgdir + idx);
797} 911}
912
798#ifdef CONFIG_X86_PAE 913#ifdef CONFIG_X86_PAE
914/* For setting a mid-level, we just throw everything away. It's easy. */
799void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) 915void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
800{ 916{
801 guest_pagetable_clear_all(&lg->cpus[0]); 917 guest_pagetable_clear_all(&lg->cpus[0]);
802} 918}
803#endif 919#endif
804 920
805/* Once we know how much memory we have we can construct simple identity 921/*H:505
806 * (which set virtual == physical) and linear mappings 922 * To get through boot, we construct simple identity page mappings (which
807 * which will get the Guest far enough into the boot to create its own. 923 * set virtual == physical) and linear mappings which will get the Guest far
924 * enough into the boot to create its own. The linear mapping means we
925 * simplify the Guest boot, but it makes assumptions about their PAGE_OFFSET,
926 * as you'll see.
808 * 927 *
809 * We lay them out of the way, just below the initrd (which is why we need to 928 * We lay them out of the way, just below the initrd (which is why we need to
810 * know its size here). */ 929 * know its size here).
930 */
811static unsigned long setup_pagetables(struct lguest *lg, 931static unsigned long setup_pagetables(struct lguest *lg,
812 unsigned long mem, 932 unsigned long mem,
813 unsigned long initrd_size) 933 unsigned long initrd_size)
@@ -825,8 +945,10 @@ static unsigned long setup_pagetables(struct lguest *lg,
825 unsigned int phys_linear; 945 unsigned int phys_linear;
826#endif 946#endif
827 947
828 /* We have mapped_pages frames to map, so we need 948 /*
829 * linear_pages page tables to map them. */ 949 * We have mapped_pages frames to map, so we need linear_pages page
950 * tables to map them.
951 */
830 mapped_pages = mem / PAGE_SIZE; 952 mapped_pages = mem / PAGE_SIZE;
831 linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE; 953 linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE;
832 954
@@ -837,10 +959,16 @@ static unsigned long setup_pagetables(struct lguest *lg,
837 linear = (void *)pgdir - linear_pages * PAGE_SIZE; 959 linear = (void *)pgdir - linear_pages * PAGE_SIZE;
838 960
839#ifdef CONFIG_X86_PAE 961#ifdef CONFIG_X86_PAE
962 /*
963 * And the single mid page goes below that. We only use one, but
964 * that's enough to map 1G, which definitely gets us through boot.
965 */
840 pmds = (void *)linear - PAGE_SIZE; 966 pmds = (void *)linear - PAGE_SIZE;
841#endif 967#endif
842 /* Linear mapping is easy: put every page's address into the 968 /*
843 * mapping in order. */ 969 * Linear mapping is easy: put every page's address into the
970 * mapping in order.
971 */
844 for (i = 0; i < mapped_pages; i++) { 972 for (i = 0; i < mapped_pages; i++) {
845 pte_t pte; 973 pte_t pte;
846 pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER)); 974 pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER));
@@ -848,11 +976,14 @@ static unsigned long setup_pagetables(struct lguest *lg,
848 return -EFAULT; 976 return -EFAULT;
849 } 977 }
850 978
851 /* The top level points to the linear page table pages above.
852 * We setup the identity and linear mappings here. */
853#ifdef CONFIG_X86_PAE 979#ifdef CONFIG_X86_PAE
980 /*
981 * Make the Guest PMD entries point to the corresponding place in the
982 * linear mapping (up to one page worth of PMD).
983 */
854 for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; 984 for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD;
855 i += PTRS_PER_PTE, j++) { 985 i += PTRS_PER_PTE, j++) {
986 /* FIXME: native_set_pmd is overkill here. */
856 native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) 987 native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i)
857 - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); 988 - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
858 989
@@ -860,18 +991,36 @@ static unsigned long setup_pagetables(struct lguest *lg,
860 return -EFAULT; 991 return -EFAULT;
861 } 992 }
862 993
994 /* One PGD entry, pointing to that PMD page. */
863 set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); 995 set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT));
996 /* Copy it in as the first PGD entry (ie. addresses 0-1G). */
864 if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) 997 if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
865 return -EFAULT; 998 return -EFAULT;
999 /*
1000 * And the third PGD entry (ie. addresses 3G-4G).
1001 *
1002 * FIXME: This assumes that PAGE_OFFSET for the Guest is 0xC0000000.
1003 */
866 if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0) 1004 if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0)
867 return -EFAULT; 1005 return -EFAULT;
868#else 1006#else
1007 /*
1008 * The top level points to the linear page table pages above.
1009 * We setup the identity and linear mappings here.
1010 */
869 phys_linear = (unsigned long)linear - mem_base; 1011 phys_linear = (unsigned long)linear - mem_base;
870 for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { 1012 for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
871 pgd_t pgd; 1013 pgd_t pgd;
1014 /*
1015 * Create a PGD entry which points to the right part of the
1016 * linear PTE pages.
1017 */
872 pgd = __pgd((phys_linear + i * sizeof(pte_t)) | 1018 pgd = __pgd((phys_linear + i * sizeof(pte_t)) |
873 (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); 1019 (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
874 1020
1021 /*
1022 * Copy it into the PGD page at 0 and PAGE_OFFSET.
1023 */
875 if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd)) 1024 if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd))
876 || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET) 1025 || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET)
877 + i / PTRS_PER_PTE], 1026 + i / PTRS_PER_PTE],
@@ -880,15 +1029,19 @@ static unsigned long setup_pagetables(struct lguest *lg,
880 } 1029 }
881#endif 1030#endif
882 1031
883 /* We return the top level (guest-physical) address: remember where 1032 /*
884 * this is. */ 1033 * We return the top level (guest-physical) address: we remember where
1034 * this is to write it into lguest_data when the Guest initializes.
1035 */
885 return (unsigned long)pgdir - mem_base; 1036 return (unsigned long)pgdir - mem_base;
886} 1037}
887 1038
888/*H:500 (vii) Setting up the page tables initially. 1039/*H:500
1040 * (vii) Setting up the page tables initially.
889 * 1041 *
890 * When a Guest is first created, the Launcher tells us where the toplevel of 1042 * When a Guest is first created, the Launcher tells us where the toplevel of
891 * its first page table is. We set some things up here: */ 1043 * its first page table is. We set some things up here:
1044 */
892int init_guest_pagetable(struct lguest *lg) 1045int init_guest_pagetable(struct lguest *lg)
893{ 1046{
894 u64 mem; 1047 u64 mem;
@@ -898,21 +1051,27 @@ int init_guest_pagetable(struct lguest *lg)
898 pgd_t *pgd; 1051 pgd_t *pgd;
899 pmd_t *pmd_table; 1052 pmd_t *pmd_table;
900#endif 1053#endif
901 /* Get the Guest memory size and the ramdisk size from the boot header 1054 /*
902 * located at lg->mem_base (Guest address 0). */ 1055 * Get the Guest memory size and the ramdisk size from the boot header
1056 * located at lg->mem_base (Guest address 0).
1057 */
903 if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) 1058 if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
904 || get_user(initrd_size, &boot->hdr.ramdisk_size)) 1059 || get_user(initrd_size, &boot->hdr.ramdisk_size))
905 return -EFAULT; 1060 return -EFAULT;
906 1061
907 /* We start on the first shadow page table, and give it a blank PGD 1062 /*
908 * page. */ 1063 * We start on the first shadow page table, and give it a blank PGD
1064 * page.
1065 */
909 lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size); 1066 lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size);
910 if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir)) 1067 if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir))
911 return lg->pgdirs[0].gpgdir; 1068 return lg->pgdirs[0].gpgdir;
912 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); 1069 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
913 if (!lg->pgdirs[0].pgdir) 1070 if (!lg->pgdirs[0].pgdir)
914 return -ENOMEM; 1071 return -ENOMEM;
1072
915#ifdef CONFIG_X86_PAE 1073#ifdef CONFIG_X86_PAE
1074 /* For PAE, we also create the initial mid-level. */
916 pgd = lg->pgdirs[0].pgdir; 1075 pgd = lg->pgdirs[0].pgdir;
917 pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL); 1076 pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
918 if (!pmd_table) 1077 if (!pmd_table)
@@ -921,27 +1080,33 @@ int init_guest_pagetable(struct lguest *lg)
921 set_pgd(pgd + SWITCHER_PGD_INDEX, 1080 set_pgd(pgd + SWITCHER_PGD_INDEX,
922 __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 1081 __pgd(__pa(pmd_table) | _PAGE_PRESENT));
923#endif 1082#endif
1083
1084 /* This is the current page table. */
924 lg->cpus[0].cpu_pgd = 0; 1085 lg->cpus[0].cpu_pgd = 0;
925 return 0; 1086 return 0;
926} 1087}
927 1088
928/* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ 1089/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
929void page_table_guest_data_init(struct lg_cpu *cpu) 1090void page_table_guest_data_init(struct lg_cpu *cpu)
930{ 1091{
931 /* We get the kernel address: above this is all kernel memory. */ 1092 /* We get the kernel address: above this is all kernel memory. */
932 if (get_user(cpu->lg->kernel_address, 1093 if (get_user(cpu->lg->kernel_address,
933 &cpu->lg->lguest_data->kernel_address) 1094 &cpu->lg->lguest_data->kernel_address)
934 /* We tell the Guest that it can't use the top 2 or 4 MB 1095 /*
935 * of virtual addresses used by the Switcher. */ 1096 * We tell the Guest that it can't use the top 2 or 4 MB
1097 * of virtual addresses used by the Switcher.
1098 */
936 || put_user(RESERVE_MEM * 1024 * 1024, 1099 || put_user(RESERVE_MEM * 1024 * 1024,
937 &cpu->lg->lguest_data->reserve_mem) 1100 &cpu->lg->lguest_data->reserve_mem)
938 || put_user(cpu->lg->pgdirs[0].gpgdir, 1101 || put_user(cpu->lg->pgdirs[0].gpgdir,
939 &cpu->lg->lguest_data->pgdir)) 1102 &cpu->lg->lguest_data->pgdir))
940 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); 1103 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
941 1104
942 /* In flush_user_mappings() we loop from 0 to 1105 /*
1106 * In flush_user_mappings() we loop from 0 to
943 * "pgd_index(lg->kernel_address)". This assumes it won't hit the 1107 * "pgd_index(lg->kernel_address)". This assumes it won't hit the
944 * Switcher mappings, so check that now. */ 1108 * Switcher mappings, so check that now.
1109 */
945#ifdef CONFIG_X86_PAE 1110#ifdef CONFIG_X86_PAE
946 if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && 1111 if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX &&
947 pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) 1112 pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
@@ -964,12 +1129,14 @@ void free_guest_pagetable(struct lguest *lg)
964 free_page((long)lg->pgdirs[i].pgdir); 1129 free_page((long)lg->pgdirs[i].pgdir);
965} 1130}
966 1131
967/*H:480 (vi) Mapping the Switcher when the Guest is about to run. 1132/*H:480
1133 * (vi) Mapping the Switcher when the Guest is about to run.
968 * 1134 *
969 * The Switcher and the two pages for this CPU need to be visible in the 1135 * The Switcher and the two pages for this CPU need to be visible in the
970 * Guest (and not the pages for other CPUs). We have the appropriate PTE pages 1136 * Guest (and not the pages for other CPUs). We have the appropriate PTE pages
971 * for each CPU already set up, we just need to hook them in now we know which 1137 * for each CPU already set up, we just need to hook them in now we know which
972 * Guest is about to run on this CPU. */ 1138 * Guest is about to run on this CPU.
1139 */
973void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) 1140void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
974{ 1141{
975 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); 1142 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
@@ -980,30 +1147,38 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
980 pmd_t switcher_pmd; 1147 pmd_t switcher_pmd;
981 pmd_t *pmd_table; 1148 pmd_t *pmd_table;
982 1149
1150 /* FIXME: native_set_pmd is overkill here. */
983 native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> 1151 native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >>
984 PAGE_SHIFT, PAGE_KERNEL_EXEC)); 1152 PAGE_SHIFT, PAGE_KERNEL_EXEC));
985 1153
1154 /* Figure out where the pmd page is, by reading the PGD, and converting
1155 * it to a virtual address. */
986 pmd_table = __va(pgd_pfn(cpu->lg-> 1156 pmd_table = __va(pgd_pfn(cpu->lg->
987 pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) 1157 pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
988 << PAGE_SHIFT); 1158 << PAGE_SHIFT);
1159 /* Now write it into the shadow page table. */
989 native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); 1160 native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
990#else 1161#else
991 pgd_t switcher_pgd; 1162 pgd_t switcher_pgd;
992 1163
993 /* Make the last PGD entry for this Guest point to the Switcher's PTE 1164 /*
994 * page for this CPU (with appropriate flags). */ 1165 * Make the last PGD entry for this Guest point to the Switcher's PTE
1166 * page for this CPU (with appropriate flags).
1167 */
995 switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); 1168 switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
996 1169
997 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; 1170 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
998 1171
999#endif 1172#endif
1000 /* We also change the Switcher PTE page. When we're running the Guest, 1173 /*
1174 * We also change the Switcher PTE page. When we're running the Guest,
1001 * we want the Guest's "regs" page to appear where the first Switcher 1175 * we want the Guest's "regs" page to appear where the first Switcher
1002 * page for this CPU is. This is an optimization: when the Switcher 1176 * page for this CPU is. This is an optimization: when the Switcher
1003 * saves the Guest registers, it saves them into the first page of this 1177 * saves the Guest registers, it saves them into the first page of this
1004 * CPU's "struct lguest_pages": if we make sure the Guest's register 1178 * CPU's "struct lguest_pages": if we make sure the Guest's register
1005 * page is already mapped there, we don't have to copy them out 1179 * page is already mapped there, we don't have to copy them out
1006 * again. */ 1180 * again.
1181 */
1007 pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; 1182 pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
1008 native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL)); 1183 native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL));
1009 native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], 1184 native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
@@ -1019,10 +1194,12 @@ static void free_switcher_pte_pages(void)
1019 free_page((long)switcher_pte_page(i)); 1194 free_page((long)switcher_pte_page(i));
1020} 1195}
1021 1196
1022/*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given 1197/*H:520
1198 * Setting up the Switcher PTE page for given CPU is fairly easy, given
1023 * the CPU number and the "struct page"s for the Switcher code itself. 1199 * the CPU number and the "struct page"s for the Switcher code itself.
1024 * 1200 *
1025 * Currently the Switcher is less than a page long, so "pages" is always 1. */ 1201 * Currently the Switcher is less than a page long, so "pages" is always 1.
1202 */
1026static __init void populate_switcher_pte_page(unsigned int cpu, 1203static __init void populate_switcher_pte_page(unsigned int cpu,
1027 struct page *switcher_page[], 1204 struct page *switcher_page[],
1028 unsigned int pages) 1205 unsigned int pages)
@@ -1043,13 +1220,16 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
1043 native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), 1220 native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
1044 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); 1221 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
1045 1222
1046 /* The second page contains the "struct lguest_ro_state", and is 1223 /*
1047 * read-only. */ 1224 * The second page contains the "struct lguest_ro_state", and is
1225 * read-only.
1226 */
1048 native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), 1227 native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
1049 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); 1228 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
1050} 1229}
1051 1230
1052/* We've made it through the page table code. Perhaps our tired brains are 1231/*
1232 * We've made it through the page table code. Perhaps our tired brains are
1053 * still processing the details, or perhaps we're simply glad it's over. 1233 * still processing the details, or perhaps we're simply glad it's over.
1054 * 1234 *
1055 * If nothing else, note that all this complexity in juggling shadow page tables 1235 * If nothing else, note that all this complexity in juggling shadow page tables
@@ -1058,10 +1238,13 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
1058 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD 1238 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
1059 * have implemented shadow page table support directly into hardware. 1239 * have implemented shadow page table support directly into hardware.
1060 * 1240 *
1061 * There is just one file remaining in the Host. */ 1241 * There is just one file remaining in the Host.
1242 */
1062 1243
1063/*H:510 At boot or module load time, init_pagetables() allocates and populates 1244/*H:510
1064 * the Switcher PTE page for each CPU. */ 1245 * At boot or module load time, init_pagetables() allocates and populates
1246 * the Switcher PTE page for each CPU.
1247 */
1065__init int init_pagetables(struct page **switcher_page, unsigned int pages) 1248__init int init_pagetables(struct page **switcher_page, unsigned int pages)
1066{ 1249{
1067 unsigned int i; 1250 unsigned int i;
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 482ed5a18750..951c57b0a7e0 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -1,4 +1,5 @@
1/*P:600 The x86 architecture has segments, which involve a table of descriptors 1/*P:600
2 * The x86 architecture has segments, which involve a table of descriptors
2 * which can be used to do funky things with virtual address interpretation. 3 * which can be used to do funky things with virtual address interpretation.
3 * We originally used to use segments so the Guest couldn't alter the 4 * We originally used to use segments so the Guest couldn't alter the
4 * Guest<->Host Switcher, and then we had to trim Guest segments, and restore 5 * Guest<->Host Switcher, and then we had to trim Guest segments, and restore
@@ -8,7 +9,8 @@
8 * 9 *
9 * In these modern times, the segment handling code consists of simple sanity 10 * In these modern times, the segment handling code consists of simple sanity
10 * checks, and the worst you'll experience reading this code is butterfly-rash 11 * checks, and the worst you'll experience reading this code is butterfly-rash
11 * from frolicking through its parklike serenity. :*/ 12 * from frolicking through its parklike serenity.
13:*/
12#include "lg.h" 14#include "lg.h"
13 15
14/*H:600 16/*H:600
@@ -41,10 +43,12 @@
41 * begin. 43 * begin.
42 */ 44 */
43 45
44/* There are several entries we don't let the Guest set. The TSS entry is the 46/*
47 * There are several entries we don't let the Guest set. The TSS entry is the
45 * "Task State Segment" which controls all kinds of delicate things. The 48 * "Task State Segment" which controls all kinds of delicate things. The
46 * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the 49 * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the
47 * the Guest can't be trusted to deal with double faults. */ 50 * the Guest can't be trusted to deal with double faults.
51 */
48static bool ignored_gdt(unsigned int num) 52static bool ignored_gdt(unsigned int num)
49{ 53{
50 return (num == GDT_ENTRY_TSS 54 return (num == GDT_ENTRY_TSS
@@ -53,42 +57,52 @@ static bool ignored_gdt(unsigned int num)
53 || num == GDT_ENTRY_DOUBLEFAULT_TSS); 57 || num == GDT_ENTRY_DOUBLEFAULT_TSS);
54} 58}
55 59
56/*H:630 Once the Guest gave us new GDT entries, we fix them up a little. We 60/*H:630
61 * Once the Guest gave us new GDT entries, we fix them up a little. We
57 * don't care if they're invalid: the worst that can happen is a General 62 * don't care if they're invalid: the worst that can happen is a General
58 * Protection Fault in the Switcher when it restores a Guest segment register 63 * Protection Fault in the Switcher when it restores a Guest segment register
59 * which tries to use that entry. Then we kill the Guest for causing such a 64 * which tries to use that entry. Then we kill the Guest for causing such a
60 * mess: the message will be "unhandled trap 256". */ 65 * mess: the message will be "unhandled trap 256".
66 */
61static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) 67static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
62{ 68{
63 unsigned int i; 69 unsigned int i;
64 70
65 for (i = start; i < end; i++) { 71 for (i = start; i < end; i++) {
66 /* We never copy these ones to real GDT, so we don't care what 72 /*
67 * they say */ 73 * We never copy these ones to real GDT, so we don't care what
74 * they say
75 */
68 if (ignored_gdt(i)) 76 if (ignored_gdt(i))
69 continue; 77 continue;
70 78
71 /* Segment descriptors contain a privilege level: the Guest is 79 /*
80 * Segment descriptors contain a privilege level: the Guest is
72 * sometimes careless and leaves this as 0, even though it's 81 * sometimes careless and leaves this as 0, even though it's
73 * running at privilege level 1. If so, we fix it here. */ 82 * running at privilege level 1. If so, we fix it here.
83 */
74 if ((cpu->arch.gdt[i].b & 0x00006000) == 0) 84 if ((cpu->arch.gdt[i].b & 0x00006000) == 0)
75 cpu->arch.gdt[i].b |= (GUEST_PL << 13); 85 cpu->arch.gdt[i].b |= (GUEST_PL << 13);
76 86
77 /* Each descriptor has an "accessed" bit. If we don't set it 87 /*
88 * Each descriptor has an "accessed" bit. If we don't set it
78 * now, the CPU will try to set it when the Guest first loads 89 * now, the CPU will try to set it when the Guest first loads
79 * that entry into a segment register. But the GDT isn't 90 * that entry into a segment register. But the GDT isn't
80 * writable by the Guest, so bad things can happen. */ 91 * writable by the Guest, so bad things can happen.
92 */
81 cpu->arch.gdt[i].b |= 0x00000100; 93 cpu->arch.gdt[i].b |= 0x00000100;
82 } 94 }
83} 95}
84 96
85/*H:610 Like the IDT, we never simply use the GDT the Guest gives us. We keep 97/*H:610
98 * Like the IDT, we never simply use the GDT the Guest gives us. We keep
86 * a GDT for each CPU, and copy across the Guest's entries each time we want to 99 * a GDT for each CPU, and copy across the Guest's entries each time we want to
87 * run the Guest on that CPU. 100 * run the Guest on that CPU.
88 * 101 *
89 * This routine is called at boot or modprobe time for each CPU to set up the 102 * This routine is called at boot or modprobe time for each CPU to set up the
90 * constant GDT entries: the ones which are the same no matter what Guest we're 103 * constant GDT entries: the ones which are the same no matter what Guest we're
91 * running. */ 104 * running.
105 */
92void setup_default_gdt_entries(struct lguest_ro_state *state) 106void setup_default_gdt_entries(struct lguest_ro_state *state)
93{ 107{
94 struct desc_struct *gdt = state->guest_gdt; 108 struct desc_struct *gdt = state->guest_gdt;
@@ -98,30 +112,37 @@ void setup_default_gdt_entries(struct lguest_ro_state *state)
98 gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; 112 gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
99 gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; 113 gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
100 114
101 /* The TSS segment refers to the TSS entry for this particular CPU. 115 /*
116 * The TSS segment refers to the TSS entry for this particular CPU.
102 * Forgive the magic flags: the 0x8900 means the entry is Present, it's 117 * Forgive the magic flags: the 0x8900 means the entry is Present, it's
103 * privilege level 0 Available 386 TSS system segment, and the 0x67 118 * privilege level 0 Available 386 TSS system segment, and the 0x67
104 * means Saturn is eclipsed by Mercury in the twelfth house. */ 119 * means Saturn is eclipsed by Mercury in the twelfth house.
120 */
105 gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); 121 gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16);
106 gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) 122 gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000)
107 | ((tss >> 16) & 0x000000FF); 123 | ((tss >> 16) & 0x000000FF);
108} 124}
109 125
110/* This routine sets up the initial Guest GDT for booting. All entries start 126/*
111 * as 0 (unusable). */ 127 * This routine sets up the initial Guest GDT for booting. All entries start
128 * as 0 (unusable).
129 */
112void setup_guest_gdt(struct lg_cpu *cpu) 130void setup_guest_gdt(struct lg_cpu *cpu)
113{ 131{
114 /* Start with full 0-4G segments... */ 132 /*
133 * Start with full 0-4G segments...except the Guest is allowed to use
134 * them, so set the privilege level appropriately in the flags.
135 */
115 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; 136 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
116 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; 137 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
117 /* ...except the Guest is allowed to use them, so set the privilege
118 * level appropriately in the flags. */
119 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); 138 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13);
120 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); 139 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
121} 140}
122 141
123/*H:650 An optimization of copy_gdt(), for just the three "thead-local storage" 142/*H:650
124 * entries. */ 143 * An optimization of copy_gdt(), for just the three "thead-local storage"
144 * entries.
145 */
125void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) 146void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt)
126{ 147{
127 unsigned int i; 148 unsigned int i;
@@ -130,26 +151,34 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt)
130 gdt[i] = cpu->arch.gdt[i]; 151 gdt[i] = cpu->arch.gdt[i];
131} 152}
132 153
133/*H:640 When the Guest is run on a different CPU, or the GDT entries have 154/*H:640
134 * changed, copy_gdt() is called to copy the Guest's GDT entries across to this 155 * When the Guest is run on a different CPU, or the GDT entries have changed,
135 * CPU's GDT. */ 156 * copy_gdt() is called to copy the Guest's GDT entries across to this CPU's
157 * GDT.
158 */
136void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) 159void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt)
137{ 160{
138 unsigned int i; 161 unsigned int i;
139 162
140 /* The default entries from setup_default_gdt_entries() are not 163 /*
141 * replaced. See ignored_gdt() above. */ 164 * The default entries from setup_default_gdt_entries() are not
165 * replaced. See ignored_gdt() above.
166 */
142 for (i = 0; i < GDT_ENTRIES; i++) 167 for (i = 0; i < GDT_ENTRIES; i++)
143 if (!ignored_gdt(i)) 168 if (!ignored_gdt(i))
144 gdt[i] = cpu->arch.gdt[i]; 169 gdt[i] = cpu->arch.gdt[i];
145} 170}
146 171
147/*H:620 This is where the Guest asks us to load a new GDT entry 172/*H:620
148 * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. */ 173 * This is where the Guest asks us to load a new GDT entry
174 * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in.
175 */
149void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) 176void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
150{ 177{
151 /* We assume the Guest has the same number of GDT entries as the 178 /*
152 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ 179 * We assume the Guest has the same number of GDT entries as the
180 * Host, otherwise we'd have to dynamically allocate the Guest GDT.
181 */
153 if (num >= ARRAY_SIZE(cpu->arch.gdt)) 182 if (num >= ARRAY_SIZE(cpu->arch.gdt))
154 kill_guest(cpu, "too many gdt entries %i", num); 183 kill_guest(cpu, "too many gdt entries %i", num);
155 184
@@ -157,15 +186,19 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
157 cpu->arch.gdt[num].a = lo; 186 cpu->arch.gdt[num].a = lo;
158 cpu->arch.gdt[num].b = hi; 187 cpu->arch.gdt[num].b = hi;
159 fixup_gdt_table(cpu, num, num+1); 188 fixup_gdt_table(cpu, num, num+1);
160 /* Mark that the GDT changed so the core knows it has to copy it again, 189 /*
161 * even if the Guest is run on the same CPU. */ 190 * Mark that the GDT changed so the core knows it has to copy it again,
191 * even if the Guest is run on the same CPU.
192 */
162 cpu->changed |= CHANGED_GDT; 193 cpu->changed |= CHANGED_GDT;
163} 194}
164 195
165/* This is the fast-track version for just changing the three TLS entries. 196/*
197 * This is the fast-track version for just changing the three TLS entries.
166 * Remember that this happens on every context switch, so it's worth 198 * Remember that this happens on every context switch, so it's worth
167 * optimizing. But wouldn't it be neater to have a single hypercall to cover 199 * optimizing. But wouldn't it be neater to have a single hypercall to cover
168 * both cases? */ 200 * both cases?
201 */
169void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) 202void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls)
170{ 203{
171 struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; 204 struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN];
@@ -175,7 +208,6 @@ void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls)
175 /* Note that just the TLS entries have changed. */ 208 /* Note that just the TLS entries have changed. */
176 cpu->changed |= CHANGED_GDT_TLS; 209 cpu->changed |= CHANGED_GDT_TLS;
177} 210}
178/*:*/
179 211
180/*H:660 212/*H:660
181 * With this, we have finished the Host. 213 * With this, we have finished the Host.
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index eaf722fe309a..6ae388849a3b 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -17,13 +17,15 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20/*P:450 This file contains the x86-specific lguest code. It used to be all 20/*P:450
21 * This file contains the x86-specific lguest code. It used to be all
21 * mixed in with drivers/lguest/core.c but several foolhardy code slashers 22 * mixed in with drivers/lguest/core.c but several foolhardy code slashers
22 * wrestled most of the dependencies out to here in preparation for porting 23 * wrestled most of the dependencies out to here in preparation for porting
23 * lguest to other architectures (see what I mean by foolhardy?). 24 * lguest to other architectures (see what I mean by foolhardy?).
24 * 25 *
25 * This also contains a couple of non-obvious setup and teardown pieces which 26 * This also contains a couple of non-obvious setup and teardown pieces which
26 * were implemented after days of debugging pain. :*/ 27 * were implemented after days of debugging pain.
28:*/
27#include <linux/kernel.h> 29#include <linux/kernel.h>
28#include <linux/start_kernel.h> 30#include <linux/start_kernel.h>
29#include <linux/string.h> 31#include <linux/string.h>
@@ -82,25 +84,33 @@ static DEFINE_PER_CPU(struct lg_cpu *, last_cpu);
82 */ 84 */
83static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) 85static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
84{ 86{
85 /* Copying all this data can be quite expensive. We usually run the 87 /*
88 * Copying all this data can be quite expensive. We usually run the
86 * same Guest we ran last time (and that Guest hasn't run anywhere else 89 * same Guest we ran last time (and that Guest hasn't run anywhere else
87 * meanwhile). If that's not the case, we pretend everything in the 90 * meanwhile). If that's not the case, we pretend everything in the
88 * Guest has changed. */ 91 * Guest has changed.
92 */
89 if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { 93 if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) {
90 __get_cpu_var(last_cpu) = cpu; 94 __get_cpu_var(last_cpu) = cpu;
91 cpu->last_pages = pages; 95 cpu->last_pages = pages;
92 cpu->changed = CHANGED_ALL; 96 cpu->changed = CHANGED_ALL;
93 } 97 }
94 98
95 /* These copies are pretty cheap, so we do them unconditionally: */ 99 /*
96 /* Save the current Host top-level page directory. */ 100 * These copies are pretty cheap, so we do them unconditionally: */
101 /* Save the current Host top-level page directory.
102 */
97 pages->state.host_cr3 = __pa(current->mm->pgd); 103 pages->state.host_cr3 = __pa(current->mm->pgd);
98 /* Set up the Guest's page tables to see this CPU's pages (and no 104 /*
99 * other CPU's pages). */ 105 * Set up the Guest's page tables to see this CPU's pages (and no
106 * other CPU's pages).
107 */
100 map_switcher_in_guest(cpu, pages); 108 map_switcher_in_guest(cpu, pages);
101 /* Set up the two "TSS" members which tell the CPU what stack to use 109 /*
110 * Set up the two "TSS" members which tell the CPU what stack to use
102 * for traps which do directly into the Guest (ie. traps at privilege 111 * for traps which do directly into the Guest (ie. traps at privilege
103 * level 1). */ 112 * level 1).
113 */
104 pages->state.guest_tss.sp1 = cpu->esp1; 114 pages->state.guest_tss.sp1 = cpu->esp1;
105 pages->state.guest_tss.ss1 = cpu->ss1; 115 pages->state.guest_tss.ss1 = cpu->ss1;
106 116
@@ -125,97 +135,126 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
125 /* This is a dummy value we need for GCC's sake. */ 135 /* This is a dummy value we need for GCC's sake. */
126 unsigned int clobber; 136 unsigned int clobber;
127 137
128 /* Copy the guest-specific information into this CPU's "struct 138 /*
129 * lguest_pages". */ 139 * Copy the guest-specific information into this CPU's "struct
140 * lguest_pages".
141 */
130 copy_in_guest_info(cpu, pages); 142 copy_in_guest_info(cpu, pages);
131 143
132 /* Set the trap number to 256 (impossible value). If we fault while 144 /*
145 * Set the trap number to 256 (impossible value). If we fault while
133 * switching to the Guest (bad segment registers or bug), this will 146 * switching to the Guest (bad segment registers or bug), this will
134 * cause us to abort the Guest. */ 147 * cause us to abort the Guest.
148 */
135 cpu->regs->trapnum = 256; 149 cpu->regs->trapnum = 256;
136 150
137 /* Now: we push the "eflags" register on the stack, then do an "lcall". 151 /*
152 * Now: we push the "eflags" register on the stack, then do an "lcall".
138 * This is how we change from using the kernel code segment to using 153 * This is how we change from using the kernel code segment to using
139 * the dedicated lguest code segment, as well as jumping into the 154 * the dedicated lguest code segment, as well as jumping into the
140 * Switcher. 155 * Switcher.
141 * 156 *
142 * The lcall also pushes the old code segment (KERNEL_CS) onto the 157 * The lcall also pushes the old code segment (KERNEL_CS) onto the
143 * stack, then the address of this call. This stack layout happens to 158 * stack, then the address of this call. This stack layout happens to
144 * exactly match the stack layout created by an interrupt... */ 159 * exactly match the stack layout created by an interrupt...
160 */
145 asm volatile("pushf; lcall *lguest_entry" 161 asm volatile("pushf; lcall *lguest_entry"
146 /* This is how we tell GCC that %eax ("a") and %ebx ("b") 162 /*
147 * are changed by this routine. The "=" means output. */ 163 * This is how we tell GCC that %eax ("a") and %ebx ("b")
164 * are changed by this routine. The "=" means output.
165 */
148 : "=a"(clobber), "=b"(clobber) 166 : "=a"(clobber), "=b"(clobber)
149 /* %eax contains the pages pointer. ("0" refers to the 167 /*
168 * %eax contains the pages pointer. ("0" refers to the
150 * 0-th argument above, ie "a"). %ebx contains the 169 * 0-th argument above, ie "a"). %ebx contains the
151 * physical address of the Guest's top-level page 170 * physical address of the Guest's top-level page
152 * directory. */ 171 * directory.
172 */
153 : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)) 173 : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir))
154 /* We tell gcc that all these registers could change, 174 /*
175 * We tell gcc that all these registers could change,
155 * which means we don't have to save and restore them in 176 * which means we don't have to save and restore them in
156 * the Switcher. */ 177 * the Switcher.
178 */
157 : "memory", "%edx", "%ecx", "%edi", "%esi"); 179 : "memory", "%edx", "%ecx", "%edi", "%esi");
158} 180}
159/*:*/ 181/*:*/
160 182
161/*M:002 There are hooks in the scheduler which we can register to tell when we 183/*M:002
184 * There are hooks in the scheduler which we can register to tell when we
162 * get kicked off the CPU (preempt_notifier_register()). This would allow us 185 * get kicked off the CPU (preempt_notifier_register()). This would allow us
163 * to lazily disable SYSENTER which would regain some performance, and should 186 * to lazily disable SYSENTER which would regain some performance, and should
164 * also simplify copy_in_guest_info(). Note that we'd still need to restore 187 * also simplify copy_in_guest_info(). Note that we'd still need to restore
165 * things when we exit to Launcher userspace, but that's fairly easy. 188 * things when we exit to Launcher userspace, but that's fairly easy.
166 * 189 *
167 * We could also try using this hooks for PGE, but that might be too expensive. 190 * We could also try using these hooks for PGE, but that might be too expensive.
168 * 191 *
169 * The hooks were designed for KVM, but we can also put them to good use. :*/ 192 * The hooks were designed for KVM, but we can also put them to good use.
193:*/
170 194
171/*H:040 This is the i386-specific code to setup and run the Guest. Interrupts 195/*H:040
172 * are disabled: we own the CPU. */ 196 * This is the i386-specific code to setup and run the Guest. Interrupts
197 * are disabled: we own the CPU.
198 */
173void lguest_arch_run_guest(struct lg_cpu *cpu) 199void lguest_arch_run_guest(struct lg_cpu *cpu)
174{ 200{
175 /* Remember the awfully-named TS bit? If the Guest has asked to set it 201 /*
202 * Remember the awfully-named TS bit? If the Guest has asked to set it
176 * we set it now, so we can trap and pass that trap to the Guest if it 203 * we set it now, so we can trap and pass that trap to the Guest if it
177 * uses the FPU. */ 204 * uses the FPU.
205 */
178 if (cpu->ts) 206 if (cpu->ts)
179 unlazy_fpu(current); 207 unlazy_fpu(current);
180 208
181 /* SYSENTER is an optimized way of doing system calls. We can't allow 209 /*
210 * SYSENTER is an optimized way of doing system calls. We can't allow
182 * it because it always jumps to privilege level 0. A normal Guest 211 * it because it always jumps to privilege level 0. A normal Guest
183 * won't try it because we don't advertise it in CPUID, but a malicious 212 * won't try it because we don't advertise it in CPUID, but a malicious
184 * Guest (or malicious Guest userspace program) could, so we tell the 213 * Guest (or malicious Guest userspace program) could, so we tell the
185 * CPU to disable it before running the Guest. */ 214 * CPU to disable it before running the Guest.
215 */
186 if (boot_cpu_has(X86_FEATURE_SEP)) 216 if (boot_cpu_has(X86_FEATURE_SEP))
187 wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); 217 wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
188 218
189 /* Now we actually run the Guest. It will return when something 219 /*
220 * Now we actually run the Guest. It will return when something
190 * interesting happens, and we can examine its registers to see what it 221 * interesting happens, and we can examine its registers to see what it
191 * was doing. */ 222 * was doing.
223 */
192 run_guest_once(cpu, lguest_pages(raw_smp_processor_id())); 224 run_guest_once(cpu, lguest_pages(raw_smp_processor_id()));
193 225
194 /* Note that the "regs" structure contains two extra entries which are 226 /*
227 * Note that the "regs" structure contains two extra entries which are
195 * not really registers: a trap number which says what interrupt or 228 * not really registers: a trap number which says what interrupt or
196 * trap made the switcher code come back, and an error code which some 229 * trap made the switcher code come back, and an error code which some
197 * traps set. */ 230 * traps set.
231 */
198 232
199 /* Restore SYSENTER if it's supposed to be on. */ 233 /* Restore SYSENTER if it's supposed to be on. */
200 if (boot_cpu_has(X86_FEATURE_SEP)) 234 if (boot_cpu_has(X86_FEATURE_SEP))
201 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); 235 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
202 236
203 /* If the Guest page faulted, then the cr2 register will tell us the 237 /*
238 * If the Guest page faulted, then the cr2 register will tell us the
204 * bad virtual address. We have to grab this now, because once we 239 * bad virtual address. We have to grab this now, because once we
205 * re-enable interrupts an interrupt could fault and thus overwrite 240 * re-enable interrupts an interrupt could fault and thus overwrite
206 * cr2, or we could even move off to a different CPU. */ 241 * cr2, or we could even move off to a different CPU.
242 */
207 if (cpu->regs->trapnum == 14) 243 if (cpu->regs->trapnum == 14)
208 cpu->arch.last_pagefault = read_cr2(); 244 cpu->arch.last_pagefault = read_cr2();
209 /* Similarly, if we took a trap because the Guest used the FPU, 245 /*
246 * Similarly, if we took a trap because the Guest used the FPU,
210 * we have to restore the FPU it expects to see. 247 * we have to restore the FPU it expects to see.
211 * math_state_restore() may sleep and we may even move off to 248 * math_state_restore() may sleep and we may even move off to
212 * a different CPU. So all the critical stuff should be done 249 * a different CPU. So all the critical stuff should be done
213 * before this. */ 250 * before this.
251 */
214 else if (cpu->regs->trapnum == 7) 252 else if (cpu->regs->trapnum == 7)
215 math_state_restore(); 253 math_state_restore();
216} 254}
217 255
218/*H:130 Now we've examined the hypercall code; our Guest can make requests. 256/*H:130
257 * Now we've examined the hypercall code; our Guest can make requests.
219 * Our Guest is usually so well behaved; it never tries to do things it isn't 258 * Our Guest is usually so well behaved; it never tries to do things it isn't
220 * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual 259 * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual
221 * infrastructure isn't quite complete, because it doesn't contain replacements 260 * infrastructure isn't quite complete, because it doesn't contain replacements
@@ -225,26 +264,33 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
225 * 264 *
226 * When the Guest uses one of these instructions, we get a trap (General 265 * When the Guest uses one of these instructions, we get a trap (General
227 * Protection Fault) and come here. We see if it's one of those troublesome 266 * Protection Fault) and come here. We see if it's one of those troublesome
228 * instructions and skip over it. We return true if we did. */ 267 * instructions and skip over it. We return true if we did.
268 */
229static int emulate_insn(struct lg_cpu *cpu) 269static int emulate_insn(struct lg_cpu *cpu)
230{ 270{
231 u8 insn; 271 u8 insn;
232 unsigned int insnlen = 0, in = 0, shift = 0; 272 unsigned int insnlen = 0, in = 0, shift = 0;
233 /* The eip contains the *virtual* address of the Guest's instruction: 273 /*
234 * guest_pa just subtracts the Guest's page_offset. */ 274 * The eip contains the *virtual* address of the Guest's instruction:
275 * guest_pa just subtracts the Guest's page_offset.
276 */
235 unsigned long physaddr = guest_pa(cpu, cpu->regs->eip); 277 unsigned long physaddr = guest_pa(cpu, cpu->regs->eip);
236 278
237 /* This must be the Guest kernel trying to do something, not userspace! 279 /*
280 * This must be the Guest kernel trying to do something, not userspace!
238 * The bottom two bits of the CS segment register are the privilege 281 * The bottom two bits of the CS segment register are the privilege
239 * level. */ 282 * level.
283 */
240 if ((cpu->regs->cs & 3) != GUEST_PL) 284 if ((cpu->regs->cs & 3) != GUEST_PL)
241 return 0; 285 return 0;
242 286
243 /* Decoding x86 instructions is icky. */ 287 /* Decoding x86 instructions is icky. */
244 insn = lgread(cpu, physaddr, u8); 288 insn = lgread(cpu, physaddr, u8);
245 289
246 /* 0x66 is an "operand prefix". It means it's using the upper 16 bits 290 /*
247 of the eax register. */ 291 * 0x66 is an "operand prefix". It means it's using the upper 16 bits
292 * of the eax register.
293 */
248 if (insn == 0x66) { 294 if (insn == 0x66) {
249 shift = 16; 295 shift = 16;
250 /* The instruction is 1 byte so far, read the next byte. */ 296 /* The instruction is 1 byte so far, read the next byte. */
@@ -252,8 +298,10 @@ static int emulate_insn(struct lg_cpu *cpu)
252 insn = lgread(cpu, physaddr + insnlen, u8); 298 insn = lgread(cpu, physaddr + insnlen, u8);
253 } 299 }
254 300
255 /* We can ignore the lower bit for the moment and decode the 4 opcodes 301 /*
256 * we need to emulate. */ 302 * We can ignore the lower bit for the moment and decode the 4 opcodes
303 * we need to emulate.
304 */
257 switch (insn & 0xFE) { 305 switch (insn & 0xFE) {
258 case 0xE4: /* in <next byte>,%al */ 306 case 0xE4: /* in <next byte>,%al */
259 insnlen += 2; 307 insnlen += 2;
@@ -274,9 +322,11 @@ static int emulate_insn(struct lg_cpu *cpu)
274 return 0; 322 return 0;
275 } 323 }
276 324
277 /* If it was an "IN" instruction, they expect the result to be read 325 /*
326 * If it was an "IN" instruction, they expect the result to be read
278 * into %eax, so we change %eax. We always return all-ones, which 327 * into %eax, so we change %eax. We always return all-ones, which
279 * traditionally means "there's nothing there". */ 328 * traditionally means "there's nothing there".
329 */
280 if (in) { 330 if (in) {
281 /* Lower bit tells is whether it's a 16 or 32 bit access */ 331 /* Lower bit tells is whether it's a 16 or 32 bit access */
282 if (insn & 0x1) 332 if (insn & 0x1)
@@ -290,7 +340,8 @@ static int emulate_insn(struct lg_cpu *cpu)
290 return 1; 340 return 1;
291} 341}
292 342
293/* Our hypercalls mechanism used to be based on direct software interrupts. 343/*
344 * Our hypercalls mechanism used to be based on direct software interrupts.
294 * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to 345 * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to
295 * change over to using kvm hypercalls. 346 * change over to using kvm hypercalls.
296 * 347 *
@@ -318,16 +369,20 @@ static int emulate_insn(struct lg_cpu *cpu)
318 */ 369 */
319static void rewrite_hypercall(struct lg_cpu *cpu) 370static void rewrite_hypercall(struct lg_cpu *cpu)
320{ 371{
321 /* This are the opcodes we use to patch the Guest. The opcode for "int 372 /*
373 * This are the opcodes we use to patch the Guest. The opcode for "int
322 * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we 374 * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we
323 * complete the sequence with a NOP (0x90). */ 375 * complete the sequence with a NOP (0x90).
376 */
324 u8 insn[3] = {0xcd, 0x1f, 0x90}; 377 u8 insn[3] = {0xcd, 0x1f, 0x90};
325 378
326 __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn)); 379 __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn));
327 /* The above write might have caused a copy of that page to be made 380 /*
381 * The above write might have caused a copy of that page to be made
328 * (if it was read-only). We need to make sure the Guest has 382 * (if it was read-only). We need to make sure the Guest has
329 * up-to-date pagetables. As this doesn't happen often, we can just 383 * up-to-date pagetables. As this doesn't happen often, we can just
330 * drop them all. */ 384 * drop them all.
385 */
331 guest_pagetable_clear_all(cpu); 386 guest_pagetable_clear_all(cpu);
332} 387}
333 388
@@ -335,9 +390,11 @@ static bool is_hypercall(struct lg_cpu *cpu)
335{ 390{
336 u8 insn[3]; 391 u8 insn[3];
337 392
338 /* This must be the Guest kernel trying to do something. 393 /*
394 * This must be the Guest kernel trying to do something.
339 * The bottom two bits of the CS segment register are the privilege 395 * The bottom two bits of the CS segment register are the privilege
340 * level. */ 396 * level.
397 */
341 if ((cpu->regs->cs & 3) != GUEST_PL) 398 if ((cpu->regs->cs & 3) != GUEST_PL)
342 return false; 399 return false;
343 400
@@ -351,86 +408,105 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
351{ 408{
352 switch (cpu->regs->trapnum) { 409 switch (cpu->regs->trapnum) {
353 case 13: /* We've intercepted a General Protection Fault. */ 410 case 13: /* We've intercepted a General Protection Fault. */
354 /* Check if this was one of those annoying IN or OUT 411 /*
412 * Check if this was one of those annoying IN or OUT
355 * instructions which we need to emulate. If so, we just go 413 * instructions which we need to emulate. If so, we just go
356 * back into the Guest after we've done it. */ 414 * back into the Guest after we've done it.
415 */
357 if (cpu->regs->errcode == 0) { 416 if (cpu->regs->errcode == 0) {
358 if (emulate_insn(cpu)) 417 if (emulate_insn(cpu))
359 return; 418 return;
360 } 419 }
361 /* If KVM is active, the vmcall instruction triggers a 420 /*
362 * General Protection Fault. Normally it triggers an 421 * If KVM is active, the vmcall instruction triggers a General
363 * invalid opcode fault (6): */ 422 * Protection Fault. Normally it triggers an invalid opcode
423 * fault (6):
424 */
364 case 6: 425 case 6:
365 /* We need to check if ring == GUEST_PL and 426 /*
366 * faulting instruction == vmcall. */ 427 * We need to check if ring == GUEST_PL and faulting
428 * instruction == vmcall.
429 */
367 if (is_hypercall(cpu)) { 430 if (is_hypercall(cpu)) {
368 rewrite_hypercall(cpu); 431 rewrite_hypercall(cpu);
369 return; 432 return;
370 } 433 }
371 break; 434 break;
372 case 14: /* We've intercepted a Page Fault. */ 435 case 14: /* We've intercepted a Page Fault. */
373 /* The Guest accessed a virtual address that wasn't mapped. 436 /*
437 * The Guest accessed a virtual address that wasn't mapped.
374 * This happens a lot: we don't actually set up most of the page 438 * This happens a lot: we don't actually set up most of the page
375 * tables for the Guest at all when we start: as it runs it asks 439 * tables for the Guest at all when we start: as it runs it asks
376 * for more and more, and we set them up as required. In this 440 * for more and more, and we set them up as required. In this
377 * case, we don't even tell the Guest that the fault happened. 441 * case, we don't even tell the Guest that the fault happened.
378 * 442 *
379 * The errcode tells whether this was a read or a write, and 443 * The errcode tells whether this was a read or a write, and
380 * whether kernel or userspace code. */ 444 * whether kernel or userspace code.
445 */
381 if (demand_page(cpu, cpu->arch.last_pagefault, 446 if (demand_page(cpu, cpu->arch.last_pagefault,
382 cpu->regs->errcode)) 447 cpu->regs->errcode))
383 return; 448 return;
384 449
385 /* OK, it's really not there (or not OK): the Guest needs to 450 /*
451 * OK, it's really not there (or not OK): the Guest needs to
386 * know. We write out the cr2 value so it knows where the 452 * know. We write out the cr2 value so it knows where the
387 * fault occurred. 453 * fault occurred.
388 * 454 *
389 * Note that if the Guest were really messed up, this could 455 * Note that if the Guest were really messed up, this could
390 * happen before it's done the LHCALL_LGUEST_INIT hypercall, so 456 * happen before it's done the LHCALL_LGUEST_INIT hypercall, so
391 * lg->lguest_data could be NULL */ 457 * lg->lguest_data could be NULL
458 */
392 if (cpu->lg->lguest_data && 459 if (cpu->lg->lguest_data &&
393 put_user(cpu->arch.last_pagefault, 460 put_user(cpu->arch.last_pagefault,
394 &cpu->lg->lguest_data->cr2)) 461 &cpu->lg->lguest_data->cr2))
395 kill_guest(cpu, "Writing cr2"); 462 kill_guest(cpu, "Writing cr2");
396 break; 463 break;
397 case 7: /* We've intercepted a Device Not Available fault. */ 464 case 7: /* We've intercepted a Device Not Available fault. */
398 /* If the Guest doesn't want to know, we already restored the 465 /*
399 * Floating Point Unit, so we just continue without telling 466 * If the Guest doesn't want to know, we already restored the
400 * it. */ 467 * Floating Point Unit, so we just continue without telling it.
468 */
401 if (!cpu->ts) 469 if (!cpu->ts)
402 return; 470 return;
403 break; 471 break;
404 case 32 ... 255: 472 case 32 ... 255:
405 /* These values mean a real interrupt occurred, in which case 473 /*
474 * These values mean a real interrupt occurred, in which case
406 * the Host handler has already been run. We just do a 475 * the Host handler has already been run. We just do a
407 * friendly check if another process should now be run, then 476 * friendly check if another process should now be run, then
408 * return to run the Guest again */ 477 * return to run the Guest again
478 */
409 cond_resched(); 479 cond_resched();
410 return; 480 return;
411 case LGUEST_TRAP_ENTRY: 481 case LGUEST_TRAP_ENTRY:
412 /* Our 'struct hcall_args' maps directly over our regs: we set 482 /*
413 * up the pointer now to indicate a hypercall is pending. */ 483 * Our 'struct hcall_args' maps directly over our regs: we set
484 * up the pointer now to indicate a hypercall is pending.
485 */
414 cpu->hcall = (struct hcall_args *)cpu->regs; 486 cpu->hcall = (struct hcall_args *)cpu->regs;
415 return; 487 return;
416 } 488 }
417 489
418 /* We didn't handle the trap, so it needs to go to the Guest. */ 490 /* We didn't handle the trap, so it needs to go to the Guest. */
419 if (!deliver_trap(cpu, cpu->regs->trapnum)) 491 if (!deliver_trap(cpu, cpu->regs->trapnum))
420 /* If the Guest doesn't have a handler (either it hasn't 492 /*
493 * If the Guest doesn't have a handler (either it hasn't
421 * registered any yet, or it's one of the faults we don't let 494 * registered any yet, or it's one of the faults we don't let
422 * it handle), it dies with this cryptic error message. */ 495 * it handle), it dies with this cryptic error message.
496 */
423 kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)", 497 kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)",
424 cpu->regs->trapnum, cpu->regs->eip, 498 cpu->regs->trapnum, cpu->regs->eip,
425 cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault 499 cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault
426 : cpu->regs->errcode); 500 : cpu->regs->errcode);
427} 501}
428 502
429/* Now we can look at each of the routines this calls, in increasing order of 503/*
504 * Now we can look at each of the routines this calls, in increasing order of
430 * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), 505 * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(),
431 * deliver_trap() and demand_page(). After all those, we'll be ready to 506 * deliver_trap() and demand_page(). After all those, we'll be ready to
432 * examine the Switcher, and our philosophical understanding of the Host/Guest 507 * examine the Switcher, and our philosophical understanding of the Host/Guest
433 * duality will be complete. :*/ 508 * duality will be complete.
509:*/
434static void adjust_pge(void *on) 510static void adjust_pge(void *on)
435{ 511{
436 if (on) 512 if (on)
@@ -439,13 +515,16 @@ static void adjust_pge(void *on)
439 write_cr4(read_cr4() & ~X86_CR4_PGE); 515 write_cr4(read_cr4() & ~X86_CR4_PGE);
440} 516}
441 517
442/*H:020 Now the Switcher is mapped and every thing else is ready, we need to do 518/*H:020
443 * some more i386-specific initialization. */ 519 * Now the Switcher is mapped and every thing else is ready, we need to do
520 * some more i386-specific initialization.
521 */
444void __init lguest_arch_host_init(void) 522void __init lguest_arch_host_init(void)
445{ 523{
446 int i; 524 int i;
447 525
448 /* Most of the i386/switcher.S doesn't care that it's been moved; on 526 /*
527 * Most of the i386/switcher.S doesn't care that it's been moved; on
449 * Intel, jumps are relative, and it doesn't access any references to 528 * Intel, jumps are relative, and it doesn't access any references to
450 * external code or data. 529 * external code or data.
451 * 530 *
@@ -453,7 +532,8 @@ void __init lguest_arch_host_init(void)
453 * addresses are placed in a table (default_idt_entries), so we need to 532 * addresses are placed in a table (default_idt_entries), so we need to
454 * update the table with the new addresses. switcher_offset() is a 533 * update the table with the new addresses. switcher_offset() is a
455 * convenience function which returns the distance between the 534 * convenience function which returns the distance between the
456 * compiled-in switcher code and the high-mapped copy we just made. */ 535 * compiled-in switcher code and the high-mapped copy we just made.
536 */
457 for (i = 0; i < IDT_ENTRIES; i++) 537 for (i = 0; i < IDT_ENTRIES; i++)
458 default_idt_entries[i] += switcher_offset(); 538 default_idt_entries[i] += switcher_offset();
459 539
@@ -468,63 +548,81 @@ void __init lguest_arch_host_init(void)
468 for_each_possible_cpu(i) { 548 for_each_possible_cpu(i) {
469 /* lguest_pages() returns this CPU's two pages. */ 549 /* lguest_pages() returns this CPU's two pages. */
470 struct lguest_pages *pages = lguest_pages(i); 550 struct lguest_pages *pages = lguest_pages(i);
471 /* This is a convenience pointer to make the code fit one 551 /* This is a convenience pointer to make the code neater. */
472 * statement to a line. */
473 struct lguest_ro_state *state = &pages->state; 552 struct lguest_ro_state *state = &pages->state;
474 553
475 /* The Global Descriptor Table: the Host has a different one 554 /*
555 * The Global Descriptor Table: the Host has a different one
476 * for each CPU. We keep a descriptor for the GDT which says 556 * for each CPU. We keep a descriptor for the GDT which says
477 * where it is and how big it is (the size is actually the last 557 * where it is and how big it is (the size is actually the last
478 * byte, not the size, hence the "-1"). */ 558 * byte, not the size, hence the "-1").
559 */
479 state->host_gdt_desc.size = GDT_SIZE-1; 560 state->host_gdt_desc.size = GDT_SIZE-1;
480 state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); 561 state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
481 562
482 /* All CPUs on the Host use the same Interrupt Descriptor 563 /*
564 * All CPUs on the Host use the same Interrupt Descriptor
483 * Table, so we just use store_idt(), which gets this CPU's IDT 565 * Table, so we just use store_idt(), which gets this CPU's IDT
484 * descriptor. */ 566 * descriptor.
567 */
485 store_idt(&state->host_idt_desc); 568 store_idt(&state->host_idt_desc);
486 569
487 /* The descriptors for the Guest's GDT and IDT can be filled 570 /*
571 * The descriptors for the Guest's GDT and IDT can be filled
488 * out now, too. We copy the GDT & IDT into ->guest_gdt and 572 * out now, too. We copy the GDT & IDT into ->guest_gdt and
489 * ->guest_idt before actually running the Guest. */ 573 * ->guest_idt before actually running the Guest.
574 */
490 state->guest_idt_desc.size = sizeof(state->guest_idt)-1; 575 state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
491 state->guest_idt_desc.address = (long)&state->guest_idt; 576 state->guest_idt_desc.address = (long)&state->guest_idt;
492 state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; 577 state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
493 state->guest_gdt_desc.address = (long)&state->guest_gdt; 578 state->guest_gdt_desc.address = (long)&state->guest_gdt;
494 579
495 /* We know where we want the stack to be when the Guest enters 580 /*
581 * We know where we want the stack to be when the Guest enters
496 * the Switcher: in pages->regs. The stack grows upwards, so 582 * the Switcher: in pages->regs. The stack grows upwards, so
497 * we start it at the end of that structure. */ 583 * we start it at the end of that structure.
584 */
498 state->guest_tss.sp0 = (long)(&pages->regs + 1); 585 state->guest_tss.sp0 = (long)(&pages->regs + 1);
499 /* And this is the GDT entry to use for the stack: we keep a 586 /*
500 * couple of special LGUEST entries. */ 587 * And this is the GDT entry to use for the stack: we keep a
588 * couple of special LGUEST entries.
589 */
501 state->guest_tss.ss0 = LGUEST_DS; 590 state->guest_tss.ss0 = LGUEST_DS;
502 591
503 /* x86 can have a finegrained bitmap which indicates what I/O 592 /*
593 * x86 can have a finegrained bitmap which indicates what I/O
504 * ports the process can use. We set it to the end of our 594 * ports the process can use. We set it to the end of our
505 * structure, meaning "none". */ 595 * structure, meaning "none".
596 */
506 state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); 597 state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
507 598
508 /* Some GDT entries are the same across all Guests, so we can 599 /*
509 * set them up now. */ 600 * Some GDT entries are the same across all Guests, so we can
601 * set them up now.
602 */
510 setup_default_gdt_entries(state); 603 setup_default_gdt_entries(state);
511 /* Most IDT entries are the same for all Guests, too.*/ 604 /* Most IDT entries are the same for all Guests, too.*/
512 setup_default_idt_entries(state, default_idt_entries); 605 setup_default_idt_entries(state, default_idt_entries);
513 606
514 /* The Host needs to be able to use the LGUEST segments on this 607 /*
515 * CPU, too, so put them in the Host GDT. */ 608 * The Host needs to be able to use the LGUEST segments on this
609 * CPU, too, so put them in the Host GDT.
610 */
516 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; 611 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
517 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; 612 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
518 } 613 }
519 614
520 /* In the Switcher, we want the %cs segment register to use the 615 /*
616 * In the Switcher, we want the %cs segment register to use the
521 * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so 617 * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so
522 * it will be undisturbed when we switch. To change %cs and jump we 618 * it will be undisturbed when we switch. To change %cs and jump we
523 * need this structure to feed to Intel's "lcall" instruction. */ 619 * need this structure to feed to Intel's "lcall" instruction.
620 */
524 lguest_entry.offset = (long)switch_to_guest + switcher_offset(); 621 lguest_entry.offset = (long)switch_to_guest + switcher_offset();
525 lguest_entry.segment = LGUEST_CS; 622 lguest_entry.segment = LGUEST_CS;
526 623
527 /* Finally, we need to turn off "Page Global Enable". PGE is an 624 /*
625 * Finally, we need to turn off "Page Global Enable". PGE is an
528 * optimization where page table entries are specially marked to show 626 * optimization where page table entries are specially marked to show
529 * they never change. The Host kernel marks all the kernel pages this 627 * they never change. The Host kernel marks all the kernel pages this
530 * way because it's always present, even when userspace is running. 628 * way because it's always present, even when userspace is running.
@@ -534,16 +632,21 @@ void __init lguest_arch_host_init(void)
534 * you'll get really weird bugs that you'll chase for two days. 632 * you'll get really weird bugs that you'll chase for two days.
535 * 633 *
536 * I used to turn PGE off every time we switched to the Guest and back 634 * I used to turn PGE off every time we switched to the Guest and back
537 * on when we return, but that slowed the Switcher down noticibly. */ 635 * on when we return, but that slowed the Switcher down noticibly.
636 */
538 637
539 /* We don't need the complexity of CPUs coming and going while we're 638 /*
540 * doing this. */ 639 * We don't need the complexity of CPUs coming and going while we're
640 * doing this.
641 */
541 get_online_cpus(); 642 get_online_cpus();
542 if (cpu_has_pge) { /* We have a broader idea of "global". */ 643 if (cpu_has_pge) { /* We have a broader idea of "global". */
543 /* Remember that this was originally set (for cleanup). */ 644 /* Remember that this was originally set (for cleanup). */
544 cpu_had_pge = 1; 645 cpu_had_pge = 1;
545 /* adjust_pge is a helper function which sets or unsets the PGE 646 /*
546 * bit on its CPU, depending on the argument (0 == unset). */ 647 * adjust_pge is a helper function which sets or unsets the PGE
648 * bit on its CPU, depending on the argument (0 == unset).
649 */
547 on_each_cpu(adjust_pge, (void *)0, 1); 650 on_each_cpu(adjust_pge, (void *)0, 1);
548 /* Turn off the feature in the global feature set. */ 651 /* Turn off the feature in the global feature set. */
549 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); 652 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
@@ -590,26 +693,32 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
590{ 693{
591 u32 tsc_speed; 694 u32 tsc_speed;
592 695
593 /* The pointer to the Guest's "struct lguest_data" is the only argument. 696 /*
594 * We check that address now. */ 697 * The pointer to the Guest's "struct lguest_data" is the only argument.
698 * We check that address now.
699 */
595 if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1, 700 if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1,
596 sizeof(*cpu->lg->lguest_data))) 701 sizeof(*cpu->lg->lguest_data)))
597 return -EFAULT; 702 return -EFAULT;
598 703
599 /* Having checked it, we simply set lg->lguest_data to point straight 704 /*
705 * Having checked it, we simply set lg->lguest_data to point straight
600 * into the Launcher's memory at the right place and then use 706 * into the Launcher's memory at the right place and then use
601 * copy_to_user/from_user from now on, instead of lgread/write. I put 707 * copy_to_user/from_user from now on, instead of lgread/write. I put
602 * this in to show that I'm not immune to writing stupid 708 * this in to show that I'm not immune to writing stupid
603 * optimizations. */ 709 * optimizations.
710 */
604 cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1; 711 cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1;
605 712
606 /* We insist that the Time Stamp Counter exist and doesn't change with 713 /*
714 * We insist that the Time Stamp Counter exist and doesn't change with
607 * cpu frequency. Some devious chip manufacturers decided that TSC 715 * cpu frequency. Some devious chip manufacturers decided that TSC
608 * changes could be handled in software. I decided that time going 716 * changes could be handled in software. I decided that time going
609 * backwards might be good for benchmarks, but it's bad for users. 717 * backwards might be good for benchmarks, but it's bad for users.
610 * 718 *
611 * We also insist that the TSC be stable: the kernel detects unreliable 719 * We also insist that the TSC be stable: the kernel detects unreliable
612 * TSCs for its own purposes, and we use that here. */ 720 * TSCs for its own purposes, and we use that here.
721 */
613 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable()) 722 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable())
614 tsc_speed = tsc_khz; 723 tsc_speed = tsc_khz;
615 else 724 else
@@ -625,38 +734,47 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
625} 734}
626/*:*/ 735/*:*/
627 736
628/*L:030 lguest_arch_setup_regs() 737/*L:030
738 * lguest_arch_setup_regs()
629 * 739 *
630 * Most of the Guest's registers are left alone: we used get_zeroed_page() to 740 * Most of the Guest's registers are left alone: we used get_zeroed_page() to
631 * allocate the structure, so they will be 0. */ 741 * allocate the structure, so they will be 0.
742 */
632void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start) 743void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start)
633{ 744{
634 struct lguest_regs *regs = cpu->regs; 745 struct lguest_regs *regs = cpu->regs;
635 746
636 /* There are four "segment" registers which the Guest needs to boot: 747 /*
748 * There are four "segment" registers which the Guest needs to boot:
637 * The "code segment" register (cs) refers to the kernel code segment 749 * The "code segment" register (cs) refers to the kernel code segment
638 * __KERNEL_CS, and the "data", "extra" and "stack" segment registers 750 * __KERNEL_CS, and the "data", "extra" and "stack" segment registers
639 * refer to the kernel data segment __KERNEL_DS. 751 * refer to the kernel data segment __KERNEL_DS.
640 * 752 *
641 * The privilege level is packed into the lower bits. The Guest runs 753 * The privilege level is packed into the lower bits. The Guest runs
642 * at privilege level 1 (GUEST_PL).*/ 754 * at privilege level 1 (GUEST_PL).
755 */
643 regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; 756 regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL;
644 regs->cs = __KERNEL_CS|GUEST_PL; 757 regs->cs = __KERNEL_CS|GUEST_PL;
645 758
646 /* The "eflags" register contains miscellaneous flags. Bit 1 (0x002) 759 /*
760 * The "eflags" register contains miscellaneous flags. Bit 1 (0x002)
647 * is supposed to always be "1". Bit 9 (0x200) controls whether 761 * is supposed to always be "1". Bit 9 (0x200) controls whether
648 * interrupts are enabled. We always leave interrupts enabled while 762 * interrupts are enabled. We always leave interrupts enabled while
649 * running the Guest. */ 763 * running the Guest.
764 */
650 regs->eflags = X86_EFLAGS_IF | 0x2; 765 regs->eflags = X86_EFLAGS_IF | 0x2;
651 766
652 /* The "Extended Instruction Pointer" register says where the Guest is 767 /*
653 * running. */ 768 * The "Extended Instruction Pointer" register says where the Guest is
769 * running.
770 */
654 regs->eip = start; 771 regs->eip = start;
655 772
656 /* %esi points to our boot information, at physical address 0, so don't 773 /*
657 * touch it. */ 774 * %esi points to our boot information, at physical address 0, so don't
775 * touch it.
776 */
658 777
659 /* There are a couple of GDT entries the Guest expects when first 778 /* There are a couple of GDT entries the Guest expects at boot. */
660 * booting. */
661 setup_guest_gdt(cpu); 779 setup_guest_gdt(cpu);
662} 780}
diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
index 3fc15318a80f..40634b0db9f7 100644
--- a/drivers/lguest/x86/switcher_32.S
+++ b/drivers/lguest/x86/switcher_32.S
@@ -1,12 +1,15 @@
1/*P:900 This is the Switcher: code which sits at 0xFFC00000 astride both the 1/*P:900
2 * Host and Guest to do the low-level Guest<->Host switch. It is as simple as 2 * This is the Switcher: code which sits at 0xFFC00000 (or 0xFFE00000) astride
3 * it can be made, but it's naturally very specific to x86. 3 * both the Host and Guest to do the low-level Guest<->Host switch. It is as
4 * simple as it can be made, but it's naturally very specific to x86.
4 * 5 *
5 * You have now completed Preparation. If this has whet your appetite; if you 6 * You have now completed Preparation. If this has whet your appetite; if you
6 * are feeling invigorated and refreshed then the next, more challenging stage 7 * are feeling invigorated and refreshed then the next, more challenging stage
7 * can be found in "make Guest". :*/ 8 * can be found in "make Guest".
9 :*/
8 10
9/*M:012 Lguest is meant to be simple: my rule of thumb is that 1% more LOC must 11/*M:012
12 * Lguest is meant to be simple: my rule of thumb is that 1% more LOC must
10 * gain at least 1% more performance. Since neither LOC nor performance can be 13 * gain at least 1% more performance. Since neither LOC nor performance can be
11 * measured beforehand, it generally means implementing a feature then deciding 14 * measured beforehand, it generally means implementing a feature then deciding
12 * if it's worth it. And once it's implemented, who can say no? 15 * if it's worth it. And once it's implemented, who can say no?
@@ -31,11 +34,14 @@
31 * Host (which is actually really easy). 34 * Host (which is actually really easy).
32 * 35 *
33 * Two questions remain. Would the performance gain outweigh the complexity? 36 * Two questions remain. Would the performance gain outweigh the complexity?
34 * And who would write the verse documenting it? :*/ 37 * And who would write the verse documenting it?
38:*/
35 39
36/*M:011 Lguest64 handles NMI. This gave me NMI envy (until I looked at their 40/*M:011
41 * Lguest64 handles NMI. This gave me NMI envy (until I looked at their
37 * code). It's worth doing though, since it would let us use oprofile in the 42 * code). It's worth doing though, since it would let us use oprofile in the
38 * Host when a Guest is running. :*/ 43 * Host when a Guest is running.
44:*/
39 45
40/*S:100 46/*S:100
41 * Welcome to the Switcher itself! 47 * Welcome to the Switcher itself!
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 6e149f4a1fff..a0f68386c12f 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -378,6 +378,17 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
378 dev->ofdev.dev.bus = &macio_bus_type; 378 dev->ofdev.dev.bus = &macio_bus_type;
379 dev->ofdev.dev.release = macio_release_dev; 379 dev->ofdev.dev.release = macio_release_dev;
380 380
381#ifdef CONFIG_PCI
382 /* Set the DMA ops to the ones from the PCI device, this could be
383 * fishy if we didn't know that on PowerMac it's always direct ops
384 * or iommu ops that will work fine
385 */
386 dev->ofdev.dev.archdata.dma_ops =
387 chip->lbus.pdev->dev.archdata.dma_ops;
388 dev->ofdev.dev.archdata.dma_data =
389 chip->lbus.pdev->dev.archdata.dma_data;
390#endif /* CONFIG_PCI */
391
381#ifdef DEBUG 392#ifdef DEBUG
382 printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", 393 printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",
383 dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj); 394 dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9933eb861c71..ed1038164019 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -776,7 +776,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
776 * But don't wait if split was due to the io size restriction 776 * But don't wait if split was due to the io size restriction
777 */ 777 */
778 if (unlikely(out_of_pages)) 778 if (unlikely(out_of_pages))
779 congestion_wait(WRITE, HZ/100); 779 congestion_wait(BLK_RW_ASYNC, HZ/100);
780 780
781 /* 781 /*
782 * With async crypto it is unsafe to share the crypto context 782 * With async crypto it is unsafe to share the crypto context
@@ -1318,7 +1318,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
1318{ 1318{
1319 struct crypt_config *cc = ti->private; 1319 struct crypt_config *cc = ti->private;
1320 1320
1321 return fn(ti, cc->dev, cc->start, data); 1321 return fn(ti, cc->dev, cc->start, ti->len, data);
1322} 1322}
1323 1323
1324static struct target_type crypt_target = { 1324static struct target_type crypt_target = {
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 4e5b843cd4d7..ebe7381f47c8 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -324,12 +324,12 @@ static int delay_iterate_devices(struct dm_target *ti,
324 struct delay_c *dc = ti->private; 324 struct delay_c *dc = ti->private;
325 int ret = 0; 325 int ret = 0;
326 326
327 ret = fn(ti, dc->dev_read, dc->start_read, data); 327 ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data);
328 if (ret) 328 if (ret)
329 goto out; 329 goto out;
330 330
331 if (dc->dev_write) 331 if (dc->dev_write)
332 ret = fn(ti, dc->dev_write, dc->start_write, data); 332 ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data);
333 333
334out: 334out:
335 return ret; 335 return ret;
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index c3ae51584b12..3710ff88fc10 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -195,7 +195,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
195 struct dm_exception_store **store) 195 struct dm_exception_store **store)
196{ 196{
197 int r = 0; 197 int r = 0;
198 struct dm_exception_store_type *type; 198 struct dm_exception_store_type *type = NULL;
199 struct dm_exception_store *tmp_store; 199 struct dm_exception_store *tmp_store;
200 char persistent; 200 char persistent;
201 201
@@ -211,12 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
211 } 211 }
212 212
213 persistent = toupper(*argv[1]); 213 persistent = toupper(*argv[1]);
214 if (persistent != 'P' && persistent != 'N') { 214 if (persistent == 'P')
215 type = get_type("P");
216 else if (persistent == 'N')
217 type = get_type("N");
218 else {
215 ti->error = "Persistent flag is not P or N"; 219 ti->error = "Persistent flag is not P or N";
216 return -EINVAL; 220 return -EINVAL;
217 } 221 }
218 222
219 type = get_type(&persistent);
220 if (!type) { 223 if (!type) {
221 ti->error = "Exception store type not recognised"; 224 ti->error = "Exception store type not recognised";
222 r = -EINVAL; 225 r = -EINVAL;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 9184b6deb868..82f7d6e6b1ea 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -139,7 +139,7 @@ static int linear_iterate_devices(struct dm_target *ti,
139{ 139{
140 struct linear_c *lc = ti->private; 140 struct linear_c *lc = ti->private;
141 141
142 return fn(ti, lc->dev, lc->start, data); 142 return fn(ti, lc->dev, lc->start, ti->len, data);
143} 143}
144 144
145static struct target_type linear_target = { 145static struct target_type linear_target = {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c70604a20897..6f0d90d4a541 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1453,7 +1453,7 @@ static int multipath_iterate_devices(struct dm_target *ti,
1453 1453
1454 list_for_each_entry(pg, &m->priority_groups, list) { 1454 list_for_each_entry(pg, &m->priority_groups, list) {
1455 list_for_each_entry(p, &pg->pgpaths, list) { 1455 list_for_each_entry(p, &pg->pgpaths, list) {
1456 ret = fn(ti, p->path.dev, ti->begin, data); 1456 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1457 if (ret) 1457 if (ret)
1458 goto out; 1458 goto out;
1459 } 1459 }
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ce8868c768cc..9726577cde49 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -638,6 +638,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
638 spin_lock_irq(&ms->lock); 638 spin_lock_irq(&ms->lock);
639 bio_list_merge(&ms->writes, &requeue); 639 bio_list_merge(&ms->writes, &requeue);
640 spin_unlock_irq(&ms->lock); 640 spin_unlock_irq(&ms->lock);
641 delayed_wake(ms);
641 } 642 }
642 643
643 /* 644 /*
@@ -1292,7 +1293,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
1292 1293
1293 for (i = 0; !ret && i < ms->nr_mirrors; i++) 1294 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1294 ret = fn(ti, ms->mirror[i].dev, 1295 ret = fn(ti, ms->mirror[i].dev,
1295 ms->mirror[i].offset, data); 1296 ms->mirror[i].offset, ti->len, data);
1296 1297
1297 return ret; 1298 return ret;
1298} 1299}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index b240e85ae39a..4e0e5937e42a 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -320,10 +320,11 @@ static int stripe_iterate_devices(struct dm_target *ti,
320 int ret = 0; 320 int ret = 0;
321 unsigned i = 0; 321 unsigned i = 0;
322 322
323 do 323 do {
324 ret = fn(ti, sc->stripe[i].dev, 324 ret = fn(ti, sc->stripe[i].dev,
325 sc->stripe[i].physical_start, data); 325 sc->stripe[i].physical_start,
326 while (!ret && ++i < sc->stripes); 326 sc->stripe_width, data);
327 } while (!ret && ++i < sc->stripes);
327 328
328 return ret; 329 return ret;
329} 330}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4899ebe767c8..d952b3441913 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -346,7 +346,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
346 * If possible, this checks an area of a destination device is valid. 346 * If possible, this checks an area of a destination device is valid.
347 */ 347 */
348static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, 348static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
349 sector_t start, void *data) 349 sector_t start, sector_t len, void *data)
350{ 350{
351 struct queue_limits *limits = data; 351 struct queue_limits *limits = data;
352 struct block_device *bdev = dev->bdev; 352 struct block_device *bdev = dev->bdev;
@@ -359,7 +359,7 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
359 if (!dev_size) 359 if (!dev_size)
360 return 1; 360 return 1;
361 361
362 if ((start >= dev_size) || (start + ti->len > dev_size)) { 362 if ((start >= dev_size) || (start + len > dev_size)) {
363 DMWARN("%s: %s too small for target", 363 DMWARN("%s: %s too small for target",
364 dm_device_name(ti->table->md), bdevname(bdev, b)); 364 dm_device_name(ti->table->md), bdevname(bdev, b));
365 return 0; 365 return 0;
@@ -377,11 +377,11 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
377 return 0; 377 return 0;
378 } 378 }
379 379
380 if (ti->len & (logical_block_size_sectors - 1)) { 380 if (len & (logical_block_size_sectors - 1)) {
381 DMWARN("%s: len=%llu not aligned to h/w " 381 DMWARN("%s: len=%llu not aligned to h/w "
382 "logical block size %hu of %s", 382 "logical block size %hu of %s",
383 dm_device_name(ti->table->md), 383 dm_device_name(ti->table->md),
384 (unsigned long long)ti->len, 384 (unsigned long long)len,
385 limits->logical_block_size, bdevname(bdev, b)); 385 limits->logical_block_size, bdevname(bdev, b));
386 return 0; 386 return 0;
387 } 387 }
@@ -482,7 +482,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
482#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 482#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
483 483
484int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 484int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
485 sector_t start, void *data) 485 sector_t start, sector_t len, void *data)
486{ 486{
487 struct queue_limits *limits = data; 487 struct queue_limits *limits = data;
488 struct block_device *bdev = dev->bdev; 488 struct block_device *bdev = dev->bdev;
@@ -495,7 +495,7 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
495 return 0; 495 return 0;
496 } 496 }
497 497
498 if (blk_stack_limits(limits, &q->limits, start) < 0) 498 if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
499 DMWARN("%s: target device %s is misaligned", 499 DMWARN("%s: target device %s is misaligned",
500 dm_device_name(ti->table->md), bdevname(bdev, b)); 500 dm_device_name(ti->table->md), bdevname(bdev, b));
501 501
@@ -830,11 +830,6 @@ unsigned dm_table_get_type(struct dm_table *t)
830 return t->type; 830 return t->type;
831} 831}
832 832
833bool dm_table_bio_based(struct dm_table *t)
834{
835 return dm_table_get_type(t) == DM_TYPE_BIO_BASED;
836}
837
838bool dm_table_request_based(struct dm_table *t) 833bool dm_table_request_based(struct dm_table *t)
839{ 834{
840 return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; 835 return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3c6d4ee8921d..8a311ea0d441 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1017,7 +1017,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
1017 clone->bi_flags |= 1 << BIO_CLONED; 1017 clone->bi_flags |= 1 << BIO_CLONED;
1018 1018
1019 if (bio_integrity(bio)) { 1019 if (bio_integrity(bio)) {
1020 bio_integrity_clone(clone, bio, GFP_NOIO); 1020 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1021 bio_integrity_trim(clone, 1021 bio_integrity_trim(clone,
1022 bio_sector_offset(bio, idx, offset), len); 1022 bio_sector_offset(bio, idx, offset), len);
1023 } 1023 }
@@ -1045,7 +1045,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
1045 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 1045 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1046 1046
1047 if (bio_integrity(bio)) { 1047 if (bio_integrity(bio)) {
1048 bio_integrity_clone(clone, bio, GFP_NOIO); 1048 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1049 1049
1050 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1050 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1051 bio_integrity_trim(clone, 1051 bio_integrity_trim(clone,
@@ -2203,16 +2203,6 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
2203 goto out; 2203 goto out;
2204 } 2204 }
2205 2205
2206 /*
2207 * It is enought that blk_queue_ordered() is called only once when
2208 * the first bio-based table is bound.
2209 *
2210 * This setting should be moved to alloc_dev() when request-based dm
2211 * supports barrier.
2212 */
2213 if (!md->map && dm_table_bio_based(table))
2214 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
2215
2216 __unbind(md); 2206 __unbind(md);
2217 r = __bind(md, table, &limits); 2207 r = __bind(md, table, &limits);
2218 2208
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 23278ae80f08..a7663eba17e2 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -61,7 +61,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits);
61int dm_table_any_busy_target(struct dm_table *t); 61int dm_table_any_busy_target(struct dm_table *t);
62int dm_table_set_type(struct dm_table *t); 62int dm_table_set_type(struct dm_table *t);
63unsigned dm_table_get_type(struct dm_table *t); 63unsigned dm_table_get_type(struct dm_table *t);
64bool dm_table_bio_based(struct dm_table *t);
65bool dm_table_request_based(struct dm_table *t); 64bool dm_table_request_based(struct dm_table *t);
66int dm_table_alloc_md_mempools(struct dm_table *t); 65int dm_table_alloc_md_mempools(struct dm_table *t);
67void dm_table_free_md_mempools(struct dm_table *t); 66void dm_table_free_md_mempools(struct dm_table *t);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 15c8b7b25a9b..5fe39c2a3d2b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -166,8 +166,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
166 rdev->sectors = sectors * mddev->chunk_sectors; 166 rdev->sectors = sectors * mddev->chunk_sectors;
167 } 167 }
168 168
169 blk_queue_stack_limits(mddev->queue, 169 disk_stack_limits(mddev->gendisk, rdev->bdev,
170 rdev->bdev->bd_disk->queue); 170 rdev->data_offset << 9);
171 /* as we don't honour merge_bvec_fn, we must never risk 171 /* as we don't honour merge_bvec_fn, we must never risk
172 * violating it, so limit ->max_sector to one PAGE, as 172 * violating it, so limit ->max_sector to one PAGE, as
173 * a one page request is never in violation. 173 * a one page request is never in violation.
@@ -220,6 +220,7 @@ static int linear_run (mddev_t *mddev)
220 mddev->queue->unplug_fn = linear_unplug; 220 mddev->queue->unplug_fn = linear_unplug;
221 mddev->queue->backing_dev_info.congested_fn = linear_congested; 221 mddev->queue->backing_dev_info.congested_fn = linear_congested;
222 mddev->queue->backing_dev_info.congested_data = mddev; 222 mddev->queue->backing_dev_info.congested_data = mddev;
223 md_integrity_register(mddev);
223 return 0; 224 return 0;
224} 225}
225 226
@@ -256,6 +257,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
256 rcu_assign_pointer(mddev->private, newconf); 257 rcu_assign_pointer(mddev->private, newconf);
257 md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); 258 md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
258 set_capacity(mddev->gendisk, mddev->array_sectors); 259 set_capacity(mddev->gendisk, mddev->array_sectors);
260 revalidate_disk(mddev->gendisk);
259 call_rcu(&oldconf->rcu, free_conf); 261 call_rcu(&oldconf->rcu, free_conf);
260 return 0; 262 return 0;
261} 263}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 09be637d52cb..9dd872000cec 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -359,6 +359,7 @@ static mddev_t * mddev_find(dev_t unit)
359 else 359 else
360 new->md_minor = MINOR(unit) >> MdpMinorShift; 360 new->md_minor = MINOR(unit) >> MdpMinorShift;
361 361
362 mutex_init(&new->open_mutex);
362 mutex_init(&new->reconfig_mutex); 363 mutex_init(&new->reconfig_mutex);
363 INIT_LIST_HEAD(&new->disks); 364 INIT_LIST_HEAD(&new->disks);
364 INIT_LIST_HEAD(&new->all_mddevs); 365 INIT_LIST_HEAD(&new->all_mddevs);
@@ -1308,7 +1309,12 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1308 } 1309 }
1309 if (mddev->level != LEVEL_MULTIPATH) { 1310 if (mddev->level != LEVEL_MULTIPATH) {
1310 int role; 1311 int role;
1311 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1312 if (rdev->desc_nr < 0 ||
1313 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1314 role = 0xffff;
1315 rdev->desc_nr = -1;
1316 } else
1317 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1312 switch(role) { 1318 switch(role) {
1313 case 0xffff: /* spare */ 1319 case 0xffff: /* spare */
1314 break; 1320 break;
@@ -1394,8 +1400,14 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1394 if (rdev2->desc_nr+1 > max_dev) 1400 if (rdev2->desc_nr+1 > max_dev)
1395 max_dev = rdev2->desc_nr+1; 1401 max_dev = rdev2->desc_nr+1;
1396 1402
1397 if (max_dev > le32_to_cpu(sb->max_dev)) 1403 if (max_dev > le32_to_cpu(sb->max_dev)) {
1404 int bmask;
1398 sb->max_dev = cpu_to_le32(max_dev); 1405 sb->max_dev = cpu_to_le32(max_dev);
1406 rdev->sb_size = max_dev * 2 + 256;
1407 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1408 if (rdev->sb_size & bmask)
1409 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1410 }
1399 for (i=0; i<max_dev;i++) 1411 for (i=0; i<max_dev;i++)
1400 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1412 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1401 1413
@@ -1487,37 +1499,76 @@ static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1487 1499
1488static LIST_HEAD(pending_raid_disks); 1500static LIST_HEAD(pending_raid_disks);
1489 1501
1490static void md_integrity_check(mdk_rdev_t *rdev, mddev_t *mddev) 1502/*
1503 * Try to register data integrity profile for an mddev
1504 *
1505 * This is called when an array is started and after a disk has been kicked
1506 * from the array. It only succeeds if all working and active component devices
1507 * are integrity capable with matching profiles.
1508 */
1509int md_integrity_register(mddev_t *mddev)
1510{
1511 mdk_rdev_t *rdev, *reference = NULL;
1512
1513 if (list_empty(&mddev->disks))
1514 return 0; /* nothing to do */
1515 if (blk_get_integrity(mddev->gendisk))
1516 return 0; /* already registered */
1517 list_for_each_entry(rdev, &mddev->disks, same_set) {
1518 /* skip spares and non-functional disks */
1519 if (test_bit(Faulty, &rdev->flags))
1520 continue;
1521 if (rdev->raid_disk < 0)
1522 continue;
1523 /*
1524 * If at least one rdev is not integrity capable, we can not
1525 * enable data integrity for the md device.
1526 */
1527 if (!bdev_get_integrity(rdev->bdev))
1528 return -EINVAL;
1529 if (!reference) {
1530 /* Use the first rdev as the reference */
1531 reference = rdev;
1532 continue;
1533 }
1534 /* does this rdev's profile match the reference profile? */
1535 if (blk_integrity_compare(reference->bdev->bd_disk,
1536 rdev->bdev->bd_disk) < 0)
1537 return -EINVAL;
1538 }
1539 /*
1540 * All component devices are integrity capable and have matching
1541 * profiles, register the common profile for the md device.
1542 */
1543 if (blk_integrity_register(mddev->gendisk,
1544 bdev_get_integrity(reference->bdev)) != 0) {
1545 printk(KERN_ERR "md: failed to register integrity for %s\n",
1546 mdname(mddev));
1547 return -EINVAL;
1548 }
1549 printk(KERN_NOTICE "md: data integrity on %s enabled\n",
1550 mdname(mddev));
1551 return 0;
1552}
1553EXPORT_SYMBOL(md_integrity_register);
1554
1555/* Disable data integrity if non-capable/non-matching disk is being added */
1556void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1491{ 1557{
1492 struct mdk_personality *pers = mddev->pers;
1493 struct gendisk *disk = mddev->gendisk;
1494 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1558 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1495 struct blk_integrity *bi_mddev = blk_get_integrity(disk); 1559 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1496 1560
1497 /* Data integrity passthrough not supported on RAID 4, 5 and 6 */ 1561 if (!bi_mddev) /* nothing to do */
1498 if (pers && pers->level >= 4 && pers->level <= 6)
1499 return; 1562 return;
1500 1563 if (rdev->raid_disk < 0) /* skip spares */
1501 /* If rdev is integrity capable, register profile for mddev */
1502 if (!bi_mddev && bi_rdev) {
1503 if (blk_integrity_register(disk, bi_rdev))
1504 printk(KERN_ERR "%s: %s Could not register integrity!\n",
1505 __func__, disk->disk_name);
1506 else
1507 printk(KERN_NOTICE "Enabling data integrity on %s\n",
1508 disk->disk_name);
1509 return; 1564 return;
1510 } 1565 if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1511 1566 rdev->bdev->bd_disk) >= 0)
1512 /* Check that mddev and rdev have matching profiles */ 1567 return;
1513 if (blk_integrity_compare(disk, rdev->bdev->bd_disk) < 0) { 1568 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1514 printk(KERN_ERR "%s: %s/%s integrity mismatch!\n", __func__, 1569 blk_integrity_unregister(mddev->gendisk);
1515 disk->disk_name, rdev->bdev->bd_disk->disk_name);
1516 printk(KERN_NOTICE "Disabling data integrity on %s\n",
1517 disk->disk_name);
1518 blk_integrity_unregister(disk);
1519 }
1520} 1570}
1571EXPORT_SYMBOL(md_integrity_add_rdev);
1521 1572
1522static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1573static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1523{ 1574{
@@ -1591,7 +1642,6 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1591 /* May as well allow recovery to be retried once */ 1642 /* May as well allow recovery to be retried once */
1592 mddev->recovery_disabled = 0; 1643 mddev->recovery_disabled = 0;
1593 1644
1594 md_integrity_check(rdev, mddev);
1595 return 0; 1645 return 0;
1596 1646
1597 fail: 1647 fail:
@@ -1756,9 +1806,10 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
1756 __u8 *uuid; 1806 __u8 *uuid;
1757 1807
1758 uuid = sb->set_uuid; 1808 uuid = sb->set_uuid;
1759 printk(KERN_INFO "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" 1809 printk(KERN_INFO
1760 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" 1810 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1761 KERN_INFO "md: Name: \"%s\" CT:%llu\n", 1811 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1812 "md: Name: \"%s\" CT:%llu\n",
1762 le32_to_cpu(sb->major_version), 1813 le32_to_cpu(sb->major_version),
1763 le32_to_cpu(sb->feature_map), 1814 le32_to_cpu(sb->feature_map),
1764 uuid[0], uuid[1], uuid[2], uuid[3], 1815 uuid[0], uuid[1], uuid[2], uuid[3],
@@ -1770,12 +1821,13 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
1770 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 1821 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
1771 1822
1772 uuid = sb->device_uuid; 1823 uuid = sb->device_uuid;
1773 printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 1824 printk(KERN_INFO
1825 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1774 " RO:%llu\n" 1826 " RO:%llu\n"
1775 KERN_INFO "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" 1827 "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1776 ":%02x%02x%02x%02x%02x%02x\n" 1828 ":%02x%02x%02x%02x%02x%02x\n"
1777 KERN_INFO "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 1829 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1778 KERN_INFO "md: (MaxDev:%u) \n", 1830 "md: (MaxDev:%u) \n",
1779 le32_to_cpu(sb->level), 1831 le32_to_cpu(sb->level),
1780 (unsigned long long)le64_to_cpu(sb->size), 1832 (unsigned long long)le64_to_cpu(sb->size),
1781 le32_to_cpu(sb->raid_disks), 1833 le32_to_cpu(sb->raid_disks),
@@ -1923,17 +1975,14 @@ repeat:
1923 /* otherwise we have to go forward and ... */ 1975 /* otherwise we have to go forward and ... */
1924 mddev->events ++; 1976 mddev->events ++;
1925 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ 1977 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1926 /* .. if the array isn't clean, insist on an odd 'events' */ 1978 /* .. if the array isn't clean, an 'even' event must also go
1927 if ((mddev->events&1)==0) { 1979 * to spares. */
1928 mddev->events++; 1980 if ((mddev->events&1)==0)
1929 nospares = 0; 1981 nospares = 0;
1930 }
1931 } else { 1982 } else {
1932 /* otherwise insist on an even 'events' (for clean states) */ 1983 /* otherwise an 'odd' event must go to spares */
1933 if ((mddev->events&1)) { 1984 if ((mddev->events&1))
1934 mddev->events++;
1935 nospares = 0; 1985 nospares = 0;
1936 }
1937 } 1986 }
1938 } 1987 }
1939 1988
@@ -2655,6 +2704,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2655 ssize_t rv = len; 2704 ssize_t rv = len;
2656 struct mdk_personality *pers; 2705 struct mdk_personality *pers;
2657 void *priv; 2706 void *priv;
2707 mdk_rdev_t *rdev;
2658 2708
2659 if (mddev->pers == NULL) { 2709 if (mddev->pers == NULL) {
2660 if (len == 0) 2710 if (len == 0)
@@ -2734,6 +2784,12 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2734 mddev_suspend(mddev); 2784 mddev_suspend(mddev);
2735 mddev->pers->stop(mddev); 2785 mddev->pers->stop(mddev);
2736 module_put(mddev->pers->owner); 2786 module_put(mddev->pers->owner);
2787 /* Invalidate devices that are now superfluous */
2788 list_for_each_entry(rdev, &mddev->disks, same_set)
2789 if (rdev->raid_disk >= mddev->raid_disks) {
2790 rdev->raid_disk = -1;
2791 clear_bit(In_sync, &rdev->flags);
2792 }
2737 mddev->pers = pers; 2793 mddev->pers = pers;
2738 mddev->private = priv; 2794 mddev->private = priv;
2739 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2795 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
@@ -3543,6 +3599,7 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3543 if (max < mddev->resync_min) 3599 if (max < mddev->resync_min)
3544 return -EINVAL; 3600 return -EINVAL;
3545 if (max < mddev->resync_max && 3601 if (max < mddev->resync_max &&
3602 mddev->ro == 0 &&
3546 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3603 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3547 return -EBUSY; 3604 return -EBUSY;
3548 3605
@@ -3573,7 +3630,8 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3573 char *e; 3630 char *e;
3574 unsigned long long new = simple_strtoull(buf, &e, 10); 3631 unsigned long long new = simple_strtoull(buf, &e, 10);
3575 3632
3576 if (mddev->pers->quiesce == NULL) 3633 if (mddev->pers == NULL ||
3634 mddev->pers->quiesce == NULL)
3577 return -EINVAL; 3635 return -EINVAL;
3578 if (buf == e || (*e && *e != '\n')) 3636 if (buf == e || (*e && *e != '\n'))
3579 return -EINVAL; 3637 return -EINVAL;
@@ -3601,7 +3659,8 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3601 char *e; 3659 char *e;
3602 unsigned long long new = simple_strtoull(buf, &e, 10); 3660 unsigned long long new = simple_strtoull(buf, &e, 10);
3603 3661
3604 if (mddev->pers->quiesce == NULL) 3662 if (mddev->pers == NULL ||
3663 mddev->pers->quiesce == NULL)
3605 return -EINVAL; 3664 return -EINVAL;
3606 if (buf == e || (*e && *e != '\n')) 3665 if (buf == e || (*e && *e != '\n'))
3607 return -EINVAL; 3666 return -EINVAL;
@@ -3681,17 +3740,8 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
3681 3740
3682 mddev->array_sectors = sectors; 3741 mddev->array_sectors = sectors;
3683 set_capacity(mddev->gendisk, mddev->array_sectors); 3742 set_capacity(mddev->gendisk, mddev->array_sectors);
3684 if (mddev->pers) { 3743 if (mddev->pers)
3685 struct block_device *bdev = bdget_disk(mddev->gendisk, 0); 3744 revalidate_disk(mddev->gendisk);
3686
3687 if (bdev) {
3688 mutex_lock(&bdev->bd_inode->i_mutex);
3689 i_size_write(bdev->bd_inode,
3690 (loff_t)mddev->array_sectors << 9);
3691 mutex_unlock(&bdev->bd_inode->i_mutex);
3692 bdput(bdev);
3693 }
3694 }
3695 3745
3696 return len; 3746 return len;
3697} 3747}
@@ -3844,11 +3894,9 @@ static int md_alloc(dev_t dev, char *name)
3844 flush_scheduled_work(); 3894 flush_scheduled_work();
3845 3895
3846 mutex_lock(&disks_mutex); 3896 mutex_lock(&disks_mutex);
3847 if (mddev->gendisk) { 3897 error = -EEXIST;
3848 mutex_unlock(&disks_mutex); 3898 if (mddev->gendisk)
3849 mddev_put(mddev); 3899 goto abort;
3850 return -EEXIST;
3851 }
3852 3900
3853 if (name) { 3901 if (name) {
3854 /* Need to ensure that 'name' is not a duplicate. 3902 /* Need to ensure that 'name' is not a duplicate.
@@ -3860,17 +3908,15 @@ static int md_alloc(dev_t dev, char *name)
3860 if (mddev2->gendisk && 3908 if (mddev2->gendisk &&
3861 strcmp(mddev2->gendisk->disk_name, name) == 0) { 3909 strcmp(mddev2->gendisk->disk_name, name) == 0) {
3862 spin_unlock(&all_mddevs_lock); 3910 spin_unlock(&all_mddevs_lock);
3863 return -EEXIST; 3911 goto abort;
3864 } 3912 }
3865 spin_unlock(&all_mddevs_lock); 3913 spin_unlock(&all_mddevs_lock);
3866 } 3914 }
3867 3915
3916 error = -ENOMEM;
3868 mddev->queue = blk_alloc_queue(GFP_KERNEL); 3917 mddev->queue = blk_alloc_queue(GFP_KERNEL);
3869 if (!mddev->queue) { 3918 if (!mddev->queue)
3870 mutex_unlock(&disks_mutex); 3919 goto abort;
3871 mddev_put(mddev);
3872 return -ENOMEM;
3873 }
3874 mddev->queue->queuedata = mddev; 3920 mddev->queue->queuedata = mddev;
3875 3921
3876 /* Can be unlocked because the queue is new: no concurrency */ 3922 /* Can be unlocked because the queue is new: no concurrency */
@@ -3880,11 +3926,9 @@ static int md_alloc(dev_t dev, char *name)
3880 3926
3881 disk = alloc_disk(1 << shift); 3927 disk = alloc_disk(1 << shift);
3882 if (!disk) { 3928 if (!disk) {
3883 mutex_unlock(&disks_mutex);
3884 blk_cleanup_queue(mddev->queue); 3929 blk_cleanup_queue(mddev->queue);
3885 mddev->queue = NULL; 3930 mddev->queue = NULL;
3886 mddev_put(mddev); 3931 goto abort;
3887 return -ENOMEM;
3888 } 3932 }
3889 disk->major = MAJOR(mddev->unit); 3933 disk->major = MAJOR(mddev->unit);
3890 disk->first_minor = unit << shift; 3934 disk->first_minor = unit << shift;
@@ -3906,16 +3950,22 @@ static int md_alloc(dev_t dev, char *name)
3906 mddev->gendisk = disk; 3950 mddev->gendisk = disk;
3907 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 3951 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
3908 &disk_to_dev(disk)->kobj, "%s", "md"); 3952 &disk_to_dev(disk)->kobj, "%s", "md");
3909 mutex_unlock(&disks_mutex); 3953 if (error) {
3910 if (error) 3954 /* This isn't possible, but as kobject_init_and_add is marked
3955 * __must_check, we must do something with the result
3956 */
3911 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3957 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3912 disk->disk_name); 3958 disk->disk_name);
3913 else { 3959 error = 0;
3960 }
3961 abort:
3962 mutex_unlock(&disks_mutex);
3963 if (!error) {
3914 kobject_uevent(&mddev->kobj, KOBJ_ADD); 3964 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3915 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); 3965 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
3916 } 3966 }
3917 mddev_put(mddev); 3967 mddev_put(mddev);
3918 return 0; 3968 return error;
3919} 3969}
3920 3970
3921static struct kobject *md_probe(dev_t dev, int *part, void *data) 3971static struct kobject *md_probe(dev_t dev, int *part, void *data)
@@ -4044,10 +4094,6 @@ static int do_md_run(mddev_t * mddev)
4044 } 4094 }
4045 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4095 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4046 4096
4047 if (pers->level >= 4 && pers->level <= 6)
4048 /* Cannot support integrity (yet) */
4049 blk_integrity_unregister(mddev->gendisk);
4050
4051 if (mddev->reshape_position != MaxSector && 4097 if (mddev->reshape_position != MaxSector &&
4052 pers->start_reshape == NULL) { 4098 pers->start_reshape == NULL) {
4053 /* This personality cannot handle reshaping... */ 4099 /* This personality cannot handle reshaping... */
@@ -4185,6 +4231,7 @@ static int do_md_run(mddev_t * mddev)
4185 md_wakeup_thread(mddev->thread); 4231 md_wakeup_thread(mddev->thread);
4186 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4232 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4187 4233
4234 revalidate_disk(mddev->gendisk);
4188 mddev->changed = 1; 4235 mddev->changed = 1;
4189 md_new_event(mddev); 4236 md_new_event(mddev);
4190 sysfs_notify_dirent(mddev->sysfs_state); 4237 sysfs_notify_dirent(mddev->sysfs_state);
@@ -4256,12 +4303,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4256 struct gendisk *disk = mddev->gendisk; 4303 struct gendisk *disk = mddev->gendisk;
4257 mdk_rdev_t *rdev; 4304 mdk_rdev_t *rdev;
4258 4305
4306 mutex_lock(&mddev->open_mutex);
4259 if (atomic_read(&mddev->openers) > is_open) { 4307 if (atomic_read(&mddev->openers) > is_open) {
4260 printk("md: %s still in use.\n",mdname(mddev)); 4308 printk("md: %s still in use.\n",mdname(mddev));
4261 return -EBUSY; 4309 err = -EBUSY;
4262 } 4310 } else if (mddev->pers) {
4263
4264 if (mddev->pers) {
4265 4311
4266 if (mddev->sync_thread) { 4312 if (mddev->sync_thread) {
4267 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4313 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -4318,8 +4364,12 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4318 if (mode == 1) 4364 if (mode == 1)
4319 set_disk_ro(disk, 1); 4365 set_disk_ro(disk, 1);
4320 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4366 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4367 err = 0;
4321 } 4368 }
4322 4369out:
4370 mutex_unlock(&mddev->open_mutex);
4371 if (err)
4372 return err;
4323 /* 4373 /*
4324 * Free resources if final stop 4374 * Free resources if final stop
4325 */ 4375 */
@@ -4385,7 +4435,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4385 blk_integrity_unregister(disk); 4435 blk_integrity_unregister(disk);
4386 md_new_event(mddev); 4436 md_new_event(mddev);
4387 sysfs_notify_dirent(mddev->sysfs_state); 4437 sysfs_notify_dirent(mddev->sysfs_state);
4388out:
4389 return err; 4438 return err;
4390} 4439}
4391 4440
@@ -5083,18 +5132,8 @@ static int update_size(mddev_t *mddev, sector_t num_sectors)
5083 return -ENOSPC; 5132 return -ENOSPC;
5084 } 5133 }
5085 rv = mddev->pers->resize(mddev, num_sectors); 5134 rv = mddev->pers->resize(mddev, num_sectors);
5086 if (!rv) { 5135 if (!rv)
5087 struct block_device *bdev; 5136 revalidate_disk(mddev->gendisk);
5088
5089 bdev = bdget_disk(mddev->gendisk, 0);
5090 if (bdev) {
5091 mutex_lock(&bdev->bd_inode->i_mutex);
5092 i_size_write(bdev->bd_inode,
5093 (loff_t)mddev->array_sectors << 9);
5094 mutex_unlock(&bdev->bd_inode->i_mutex);
5095 bdput(bdev);
5096 }
5097 }
5098 return rv; 5137 return rv;
5099} 5138}
5100 5139
@@ -5480,12 +5519,12 @@ static int md_open(struct block_device *bdev, fmode_t mode)
5480 } 5519 }
5481 BUG_ON(mddev != bdev->bd_disk->private_data); 5520 BUG_ON(mddev != bdev->bd_disk->private_data);
5482 5521
5483 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) 5522 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
5484 goto out; 5523 goto out;
5485 5524
5486 err = 0; 5525 err = 0;
5487 atomic_inc(&mddev->openers); 5526 atomic_inc(&mddev->openers);
5488 mddev_unlock(mddev); 5527 mutex_unlock(&mddev->open_mutex);
5489 5528
5490 check_disk_change(bdev); 5529 check_disk_change(bdev);
5491 out: 5530 out:
@@ -6334,10 +6373,16 @@ void md_do_sync(mddev_t *mddev)
6334 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6373 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6335 } 6374 }
6336 6375
6337 if (j >= mddev->resync_max) 6376 while (j >= mddev->resync_max && !kthread_should_stop()) {
6338 wait_event(mddev->recovery_wait, 6377 /* As this condition is controlled by user-space,
6339 mddev->resync_max > j 6378 * we can block indefinitely, so use '_interruptible'
6340 || kthread_should_stop()); 6379 * to avoid triggering warnings.
6380 */
6381 flush_signals(current); /* just in case */
6382 wait_event_interruptible(mddev->recovery_wait,
6383 mddev->resync_max > j
6384 || kthread_should_stop());
6385 }
6341 6386
6342 if (kthread_should_stop()) 6387 if (kthread_should_stop())
6343 goto interrupted; 6388 goto interrupted;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 9430a110db93..f8fc188bc762 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -223,6 +223,16 @@ struct mddev_s
223 * so we don't loop trying */ 223 * so we don't loop trying */
224 224
225 int in_sync; /* know to not need resync */ 225 int in_sync; /* know to not need resync */
226 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
227 * that we are never stopping an array while it is open.
228 * 'reconfig_mutex' protects all other reconfiguration.
229 * These locks are separate due to conflicting interactions
230 * with bdev->bd_mutex.
231 * Lock ordering is:
232 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
233 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
234 */
235 struct mutex open_mutex;
226 struct mutex reconfig_mutex; 236 struct mutex reconfig_mutex;
227 atomic_t active; /* general refcount */ 237 atomic_t active; /* general refcount */
228 atomic_t openers; /* number of active opens */ 238 atomic_t openers; /* number of active opens */
@@ -431,5 +441,7 @@ extern int md_allow_write(mddev_t *mddev);
431extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); 441extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
432extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); 442extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
433extern int md_check_no_bitmap(mddev_t *mddev); 443extern int md_check_no_bitmap(mddev_t *mddev);
444extern int md_integrity_register(mddev_t *mddev);
445void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
434 446
435#endif /* _MD_MD_H */ 447#endif /* _MD_MD_H */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index cbe368fa6598..7140909f6662 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -294,7 +294,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
294 for (path = first; path <= last; path++) 294 for (path = first; path <= last; path++)
295 if ((p=conf->multipaths+path)->rdev == NULL) { 295 if ((p=conf->multipaths+path)->rdev == NULL) {
296 q = rdev->bdev->bd_disk->queue; 296 q = rdev->bdev->bd_disk->queue;
297 blk_queue_stack_limits(mddev->queue, q); 297 disk_stack_limits(mddev->gendisk, rdev->bdev,
298 rdev->data_offset << 9);
298 299
299 /* as we don't honour merge_bvec_fn, we must never risk 300 /* as we don't honour merge_bvec_fn, we must never risk
300 * violating it, so limit ->max_sector to one PAGE, as 301 * violating it, so limit ->max_sector to one PAGE, as
@@ -312,6 +313,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
312 set_bit(In_sync, &rdev->flags); 313 set_bit(In_sync, &rdev->flags);
313 rcu_assign_pointer(p->rdev, rdev); 314 rcu_assign_pointer(p->rdev, rdev);
314 err = 0; 315 err = 0;
316 md_integrity_add_rdev(rdev, mddev);
315 break; 317 break;
316 } 318 }
317 319
@@ -344,7 +346,9 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
344 /* lost the race, try later */ 346 /* lost the race, try later */
345 err = -EBUSY; 347 err = -EBUSY;
346 p->rdev = rdev; 348 p->rdev = rdev;
349 goto abort;
347 } 350 }
351 md_integrity_register(mddev);
348 } 352 }
349abort: 353abort:
350 354
@@ -463,9 +467,9 @@ static int multipath_run (mddev_t *mddev)
463 467
464 disk = conf->multipaths + disk_idx; 468 disk = conf->multipaths + disk_idx;
465 disk->rdev = rdev; 469 disk->rdev = rdev;
470 disk_stack_limits(mddev->gendisk, rdev->bdev,
471 rdev->data_offset << 9);
466 472
467 blk_queue_stack_limits(mddev->queue,
468 rdev->bdev->bd_disk->queue);
469 /* as we don't honour merge_bvec_fn, we must never risk 473 /* as we don't honour merge_bvec_fn, we must never risk
470 * violating it, not that we ever expect a device with 474 * violating it, not that we ever expect a device with
471 * a merge_bvec_fn to be involved in multipath */ 475 * a merge_bvec_fn to be involved in multipath */
@@ -518,7 +522,7 @@ static int multipath_run (mddev_t *mddev)
518 mddev->queue->unplug_fn = multipath_unplug; 522 mddev->queue->unplug_fn = multipath_unplug;
519 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 523 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
520 mddev->queue->backing_dev_info.congested_data = mddev; 524 mddev->queue->backing_dev_info.congested_data = mddev;
521 525 md_integrity_register(mddev);
522 return 0; 526 return 0;
523 527
524out_free_conf: 528out_free_conf:
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ab4a489d8695..898e2bdfee47 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -170,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev)
170 } 170 }
171 dev[j] = rdev1; 171 dev[j] = rdev1;
172 172
173 blk_queue_stack_limits(mddev->queue, 173 disk_stack_limits(mddev->gendisk, rdev1->bdev,
174 rdev1->bdev->bd_disk->queue); 174 rdev1->data_offset << 9);
175 /* as we don't honour merge_bvec_fn, we must never risk 175 /* as we don't honour merge_bvec_fn, we must never risk
176 * violating it, so limit ->max_sector to one PAGE, as 176 * violating it, so limit ->max_sector to one PAGE, as
177 * a one page request is never in violation. 177 * a one page request is never in violation.
@@ -250,6 +250,11 @@ static int create_strip_zones(mddev_t *mddev)
250 mddev->chunk_sectors << 9); 250 mddev->chunk_sectors << 9);
251 goto abort; 251 goto abort;
252 } 252 }
253
254 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
255 blk_queue_io_opt(mddev->queue,
256 (mddev->chunk_sectors << 9) * mddev->raid_disks);
257
253 printk(KERN_INFO "raid0: done.\n"); 258 printk(KERN_INFO "raid0: done.\n");
254 mddev->private = conf; 259 mddev->private = conf;
255 return 0; 260 return 0;
@@ -346,6 +351,7 @@ static int raid0_run(mddev_t *mddev)
346 351
347 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); 352 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
348 dump_zones(mddev); 353 dump_zones(mddev);
354 md_integrity_register(mddev);
349 return 0; 355 return 0;
350} 356}
351 357
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 89939a7aef57..8726fd7ebce5 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1123,8 +1123,8 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1123 for (mirror = first; mirror <= last; mirror++) 1123 for (mirror = first; mirror <= last; mirror++)
1124 if ( !(p=conf->mirrors+mirror)->rdev) { 1124 if ( !(p=conf->mirrors+mirror)->rdev) {
1125 1125
1126 blk_queue_stack_limits(mddev->queue, 1126 disk_stack_limits(mddev->gendisk, rdev->bdev,
1127 rdev->bdev->bd_disk->queue); 1127 rdev->data_offset << 9);
1128 /* as we don't honour merge_bvec_fn, we must never risk 1128 /* as we don't honour merge_bvec_fn, we must never risk
1129 * violating it, so limit ->max_sector to one PAGE, as 1129 * violating it, so limit ->max_sector to one PAGE, as
1130 * a one page request is never in violation. 1130 * a one page request is never in violation.
@@ -1144,7 +1144,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1144 rcu_assign_pointer(p->rdev, rdev); 1144 rcu_assign_pointer(p->rdev, rdev);
1145 break; 1145 break;
1146 } 1146 }
1147 1147 md_integrity_add_rdev(rdev, mddev);
1148 print_conf(conf); 1148 print_conf(conf);
1149 return err; 1149 return err;
1150} 1150}
@@ -1178,7 +1178,9 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
1178 /* lost the race, try later */ 1178 /* lost the race, try later */
1179 err = -EBUSY; 1179 err = -EBUSY;
1180 p->rdev = rdev; 1180 p->rdev = rdev;
1181 goto abort;
1181 } 1182 }
1183 md_integrity_register(mddev);
1182 } 1184 }
1183abort: 1185abort:
1184 1186
@@ -1988,9 +1990,8 @@ static int run(mddev_t *mddev)
1988 disk = conf->mirrors + disk_idx; 1990 disk = conf->mirrors + disk_idx;
1989 1991
1990 disk->rdev = rdev; 1992 disk->rdev = rdev;
1991 1993 disk_stack_limits(mddev->gendisk, rdev->bdev,
1992 blk_queue_stack_limits(mddev->queue, 1994 rdev->data_offset << 9);
1993 rdev->bdev->bd_disk->queue);
1994 /* as we don't honour merge_bvec_fn, we must never risk 1995 /* as we don't honour merge_bvec_fn, we must never risk
1995 * violating it, so limit ->max_sector to one PAGE, as 1996 * violating it, so limit ->max_sector to one PAGE, as
1996 * a one page request is never in violation. 1997 * a one page request is never in violation.
@@ -2068,7 +2069,7 @@ static int run(mddev_t *mddev)
2068 mddev->queue->unplug_fn = raid1_unplug; 2069 mddev->queue->unplug_fn = raid1_unplug;
2069 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2070 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2070 mddev->queue->backing_dev_info.congested_data = mddev; 2071 mddev->queue->backing_dev_info.congested_data = mddev;
2071 2072 md_integrity_register(mddev);
2072 return 0; 2073 return 0;
2073 2074
2074out_no_mem: 2075out_no_mem:
@@ -2133,6 +2134,7 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
2133 return -EINVAL; 2134 return -EINVAL;
2134 set_capacity(mddev->gendisk, mddev->array_sectors); 2135 set_capacity(mddev->gendisk, mddev->array_sectors);
2135 mddev->changed = 1; 2136 mddev->changed = 1;
2137 revalidate_disk(mddev->gendisk);
2136 if (sectors > mddev->dev_sectors && 2138 if (sectors > mddev->dev_sectors &&
2137 mddev->recovery_cp == MaxSector) { 2139 mddev->recovery_cp == MaxSector) {
2138 mddev->recovery_cp = mddev->dev_sectors; 2140 mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ae12ceafe10c..3d9020cf6f6e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1151 for ( ; mirror <= last ; mirror++) 1151 for ( ; mirror <= last ; mirror++)
1152 if ( !(p=conf->mirrors+mirror)->rdev) { 1152 if ( !(p=conf->mirrors+mirror)->rdev) {
1153 1153
1154 blk_queue_stack_limits(mddev->queue, 1154 disk_stack_limits(mddev->gendisk, rdev->bdev,
1155 rdev->bdev->bd_disk->queue); 1155 rdev->data_offset << 9);
1156 /* as we don't honour merge_bvec_fn, we must never risk 1156 /* as we don't honour merge_bvec_fn, we must never risk
1157 * violating it, so limit ->max_sector to one PAGE, as 1157 * violating it, so limit ->max_sector to one PAGE, as
1158 * a one page request is never in violation. 1158 * a one page request is never in violation.
@@ -1170,6 +1170,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1170 break; 1170 break;
1171 } 1171 }
1172 1172
1173 md_integrity_add_rdev(rdev, mddev);
1173 print_conf(conf); 1174 print_conf(conf);
1174 return err; 1175 return err;
1175} 1176}
@@ -1203,7 +1204,9 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
1203 /* lost the race, try later */ 1204 /* lost the race, try later */
1204 err = -EBUSY; 1205 err = -EBUSY;
1205 p->rdev = rdev; 1206 p->rdev = rdev;
1207 goto abort;
1206 } 1208 }
1209 md_integrity_register(mddev);
1207 } 1210 }
1208abort: 1211abort:
1209 1212
@@ -2044,7 +2047,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2044static int run(mddev_t *mddev) 2047static int run(mddev_t *mddev)
2045{ 2048{
2046 conf_t *conf; 2049 conf_t *conf;
2047 int i, disk_idx; 2050 int i, disk_idx, chunk_size;
2048 mirror_info_t *disk; 2051 mirror_info_t *disk;
2049 mdk_rdev_t *rdev; 2052 mdk_rdev_t *rdev;
2050 int nc, fc, fo; 2053 int nc, fc, fo;
@@ -2130,6 +2133,14 @@ static int run(mddev_t *mddev)
2130 spin_lock_init(&conf->device_lock); 2133 spin_lock_init(&conf->device_lock);
2131 mddev->queue->queue_lock = &conf->device_lock; 2134 mddev->queue->queue_lock = &conf->device_lock;
2132 2135
2136 chunk_size = mddev->chunk_sectors << 9;
2137 blk_queue_io_min(mddev->queue, chunk_size);
2138 if (conf->raid_disks % conf->near_copies)
2139 blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
2140 else
2141 blk_queue_io_opt(mddev->queue, chunk_size *
2142 (conf->raid_disks / conf->near_copies));
2143
2133 list_for_each_entry(rdev, &mddev->disks, same_set) { 2144 list_for_each_entry(rdev, &mddev->disks, same_set) {
2134 disk_idx = rdev->raid_disk; 2145 disk_idx = rdev->raid_disk;
2135 if (disk_idx >= mddev->raid_disks 2146 if (disk_idx >= mddev->raid_disks
@@ -2138,9 +2149,8 @@ static int run(mddev_t *mddev)
2138 disk = conf->mirrors + disk_idx; 2149 disk = conf->mirrors + disk_idx;
2139 2150
2140 disk->rdev = rdev; 2151 disk->rdev = rdev;
2141 2152 disk_stack_limits(mddev->gendisk, rdev->bdev,
2142 blk_queue_stack_limits(mddev->queue, 2153 rdev->data_offset << 9);
2143 rdev->bdev->bd_disk->queue);
2144 /* as we don't honour merge_bvec_fn, we must never risk 2154 /* as we don't honour merge_bvec_fn, we must never risk
2145 * violating it, so limit ->max_sector to one PAGE, as 2155 * violating it, so limit ->max_sector to one PAGE, as
2146 * a one page request is never in violation. 2156 * a one page request is never in violation.
@@ -2218,6 +2228,7 @@ static int run(mddev_t *mddev)
2218 2228
2219 if (conf->near_copies < mddev->raid_disks) 2229 if (conf->near_copies < mddev->raid_disks)
2220 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 2230 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2231 md_integrity_register(mddev);
2221 return 0; 2232 return 0;
2222 2233
2223out_free_conf: 2234out_free_conf:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cac6f4d3a143..9b00a229015a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3911,13 +3911,21 @@ static int make_request(struct request_queue *q, struct bio * bi)
3911 goto retry; 3911 goto retry;
3912 } 3912 }
3913 } 3913 }
3914 /* FIXME what if we get a false positive because these 3914
3915 * are being updated. 3915 if (bio_data_dir(bi) == WRITE &&
3916 */ 3916 logical_sector >= mddev->suspend_lo &&
3917 if (logical_sector >= mddev->suspend_lo &&
3918 logical_sector < mddev->suspend_hi) { 3917 logical_sector < mddev->suspend_hi) {
3919 release_stripe(sh); 3918 release_stripe(sh);
3920 schedule(); 3919 /* As the suspend_* range is controlled by
3920 * userspace, we want an interruptible
3921 * wait.
3922 */
3923 flush_signals(current);
3924 prepare_to_wait(&conf->wait_for_overlap,
3925 &w, TASK_INTERRUPTIBLE);
3926 if (logical_sector >= mddev->suspend_lo &&
3927 logical_sector < mddev->suspend_hi)
3928 schedule();
3921 goto retry; 3929 goto retry;
3922 } 3930 }
3923 3931
@@ -3989,7 +3997,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3989 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 3997 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
3990 sector_nr = raid5_size(mddev, 0, 0) 3998 sector_nr = raid5_size(mddev, 0, 0)
3991 - conf->reshape_progress; 3999 - conf->reshape_progress;
3992 } else if (mddev->delta_disks > 0 && 4000 } else if (mddev->delta_disks >= 0 &&
3993 conf->reshape_progress > 0) 4001 conf->reshape_progress > 0)
3994 sector_nr = conf->reshape_progress; 4002 sector_nr = conf->reshape_progress;
3995 sector_div(sector_nr, new_data_disks); 4003 sector_div(sector_nr, new_data_disks);
@@ -4203,6 +4211,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
4203 return 0; 4211 return 0;
4204 } 4212 }
4205 4213
4214 /* Allow raid5_quiesce to complete */
4215 wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4216
4206 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4217 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4207 return reshape_request(mddev, sector_nr, skipped); 4218 return reshape_request(mddev, sector_nr, skipped);
4208 4219
@@ -4803,7 +4814,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4803static int run(mddev_t *mddev) 4814static int run(mddev_t *mddev)
4804{ 4815{
4805 raid5_conf_t *conf; 4816 raid5_conf_t *conf;
4806 int working_disks = 0; 4817 int working_disks = 0, chunk_size;
4807 mdk_rdev_t *rdev; 4818 mdk_rdev_t *rdev;
4808 4819
4809 if (mddev->recovery_cp != MaxSector) 4820 if (mddev->recovery_cp != MaxSector)
@@ -4844,7 +4855,26 @@ static int run(mddev_t *mddev)
4844 (old_disks-max_degraded)); 4855 (old_disks-max_degraded));
4845 /* here_old is the first stripe that we might need to read 4856 /* here_old is the first stripe that we might need to read
4846 * from */ 4857 * from */
4847 if (here_new >= here_old) { 4858 if (mddev->delta_disks == 0) {
4859 /* We cannot be sure it is safe to start an in-place
4860 * reshape. It is only safe if user-space if monitoring
4861 * and taking constant backups.
4862 * mdadm always starts a situation like this in
4863 * readonly mode so it can take control before
4864 * allowing any writes. So just check for that.
4865 */
4866 if ((here_new * mddev->new_chunk_sectors !=
4867 here_old * mddev->chunk_sectors) ||
4868 mddev->ro == 0) {
4869 printk(KERN_ERR "raid5: in-place reshape must be started"
4870 " in read-only mode - aborting\n");
4871 return -EINVAL;
4872 }
4873 } else if (mddev->delta_disks < 0
4874 ? (here_new * mddev->new_chunk_sectors <=
4875 here_old * mddev->chunk_sectors)
4876 : (here_new * mddev->new_chunk_sectors >=
4877 here_old * mddev->chunk_sectors)) {
4848 /* Reading from the same stripe as writing to - bad */ 4878 /* Reading from the same stripe as writing to - bad */
4849 printk(KERN_ERR "raid5: reshape_position too early for " 4879 printk(KERN_ERR "raid5: reshape_position too early for "
4850 "auto-recovery - aborting.\n"); 4880 "auto-recovery - aborting.\n");
@@ -4958,6 +4988,14 @@ static int run(mddev_t *mddev)
4958 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 4988 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
4959 4989
4960 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4990 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4991 chunk_size = mddev->chunk_sectors << 9;
4992 blk_queue_io_min(mddev->queue, chunk_size);
4993 blk_queue_io_opt(mddev->queue, chunk_size *
4994 (conf->raid_disks - conf->max_degraded));
4995
4996 list_for_each_entry(rdev, &mddev->disks, same_set)
4997 disk_stack_limits(mddev->gendisk, rdev->bdev,
4998 rdev->data_offset << 9);
4961 4999
4962 return 0; 5000 return 0;
4963abort: 5001abort:
@@ -5185,6 +5223,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
5185 return -EINVAL; 5223 return -EINVAL;
5186 set_capacity(mddev->gendisk, mddev->array_sectors); 5224 set_capacity(mddev->gendisk, mddev->array_sectors);
5187 mddev->changed = 1; 5225 mddev->changed = 1;
5226 revalidate_disk(mddev->gendisk);
5188 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { 5227 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
5189 mddev->recovery_cp = mddev->dev_sectors; 5228 mddev->recovery_cp = mddev->dev_sectors;
5190 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5229 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -5330,7 +5369,7 @@ static int raid5_start_reshape(mddev_t *mddev)
5330 spin_unlock_irqrestore(&conf->device_lock, flags); 5369 spin_unlock_irqrestore(&conf->device_lock, flags);
5331 } 5370 }
5332 mddev->raid_disks = conf->raid_disks; 5371 mddev->raid_disks = conf->raid_disks;
5333 mddev->reshape_position = 0; 5372 mddev->reshape_position = conf->reshape_progress;
5334 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5373 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5335 5374
5336 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5375 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -5385,7 +5424,6 @@ static void end_reshape(raid5_conf_t *conf)
5385 */ 5424 */
5386static void raid5_finish_reshape(mddev_t *mddev) 5425static void raid5_finish_reshape(mddev_t *mddev)
5387{ 5426{
5388 struct block_device *bdev;
5389 raid5_conf_t *conf = mddev->private; 5427 raid5_conf_t *conf = mddev->private;
5390 5428
5391 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5429 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -5394,15 +5432,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
5394 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5432 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5395 set_capacity(mddev->gendisk, mddev->array_sectors); 5433 set_capacity(mddev->gendisk, mddev->array_sectors);
5396 mddev->changed = 1; 5434 mddev->changed = 1;
5397 5435 revalidate_disk(mddev->gendisk);
5398 bdev = bdget_disk(mddev->gendisk, 0);
5399 if (bdev) {
5400 mutex_lock(&bdev->bd_inode->i_mutex);
5401 i_size_write(bdev->bd_inode,
5402 (loff_t)mddev->array_sectors << 9);
5403 mutex_unlock(&bdev->bd_inode->i_mutex);
5404 bdput(bdev);
5405 }
5406 } else { 5436 } else {
5407 int d; 5437 int d;
5408 mddev->degraded = conf->raid_disks; 5438 mddev->degraded = conf->raid_disks;
@@ -5413,8 +5443,15 @@ static void raid5_finish_reshape(mddev_t *mddev)
5413 mddev->degraded--; 5443 mddev->degraded--;
5414 for (d = conf->raid_disks ; 5444 for (d = conf->raid_disks ;
5415 d < conf->raid_disks - mddev->delta_disks; 5445 d < conf->raid_disks - mddev->delta_disks;
5416 d++) 5446 d++) {
5417 raid5_remove_disk(mddev, d); 5447 mdk_rdev_t *rdev = conf->disks[d].rdev;
5448 if (rdev && raid5_remove_disk(mddev, d) == 0) {
5449 char nm[20];
5450 sprintf(nm, "rd%d", rdev->raid_disk);
5451 sysfs_remove_link(&mddev->kobj, nm);
5452 rdev->raid_disk = -1;
5453 }
5454 }
5418 } 5455 }
5419 mddev->layout = conf->algorithm; 5456 mddev->layout = conf->algorithm;
5420 mddev->chunk_sectors = conf->chunk_sectors; 5457 mddev->chunk_sectors = conf->chunk_sectors;
@@ -5434,12 +5471,18 @@ static void raid5_quiesce(mddev_t *mddev, int state)
5434 5471
5435 case 1: /* stop all writes */ 5472 case 1: /* stop all writes */
5436 spin_lock_irq(&conf->device_lock); 5473 spin_lock_irq(&conf->device_lock);
5437 conf->quiesce = 1; 5474 /* '2' tells resync/reshape to pause so that all
5475 * active stripes can drain
5476 */
5477 conf->quiesce = 2;
5438 wait_event_lock_irq(conf->wait_for_stripe, 5478 wait_event_lock_irq(conf->wait_for_stripe,
5439 atomic_read(&conf->active_stripes) == 0 && 5479 atomic_read(&conf->active_stripes) == 0 &&
5440 atomic_read(&conf->active_aligned_reads) == 0, 5480 atomic_read(&conf->active_aligned_reads) == 0,
5441 conf->device_lock, /* nothing */); 5481 conf->device_lock, /* nothing */);
5482 conf->quiesce = 1;
5442 spin_unlock_irq(&conf->device_lock); 5483 spin_unlock_irq(&conf->device_lock);
5484 /* allow reshape to continue */
5485 wake_up(&conf->wait_for_overlap);
5443 break; 5486 break;
5444 5487
5445 case 0: /* re-enable writes */ 5488 case 0: /* re-enable writes */
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index b6da9c3873fe..aa20ce8cc668 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -1096,8 +1096,19 @@ static int xc2028_set_params(struct dvb_frontend *fe,
1096 } 1096 }
1097 1097
1098 /* All S-code tables need a 200kHz shift */ 1098 /* All S-code tables need a 200kHz shift */
1099 if (priv->ctrl.demod) 1099 if (priv->ctrl.demod) {
1100 demod = priv->ctrl.demod + 200; 1100 demod = priv->ctrl.demod + 200;
1101 /*
1102 * The DTV7 S-code table needs a 700 kHz shift.
1103 * Thanks to Terry Wu <terrywu2009@gmail.com> for reporting this
1104 *
1105 * DTV7 is only used in Australia. Germany or Italy may also
1106 * use this firmware after initialization, but a tune to a UHF
1107 * channel should then cause DTV78 to be used.
1108 */
1109 if (type & DTV7)
1110 demod += 500;
1111 }
1101 1112
1102 return generic_set_freq(fe, p->frequency, 1113 return generic_set_freq(fe, p->frequency,
1103 T_DIGITAL_TV, type, 0, demod); 1114 T_DIGITAL_TV, type, 0, demod);
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index efb4a6c2b57a..9a6307a347b2 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -20,8 +20,14 @@
20#include "tuner-simple.h" 20#include "tuner-simple.h"
21#include "stv0297.h" 21#include "stv0297.h"
22 22
23
24/* Can we use the specified front-end? Remember that if we are compiled
25 * into the kernel we can't call code that's in modules. */
26#define FE_SUPPORTED(fe) (defined(CONFIG_DVB_##fe) || \
27 (defined(CONFIG_DVB_##fe##_MODULE) && defined(MODULE)))
28
23/* lnb control */ 29/* lnb control */
24#if defined(CONFIG_DVB_MT312_MODULE) || defined(CONFIG_DVB_STV0299_MODULE) 30#if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)
25static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) 31static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
26{ 32{
27 struct flexcop_device *fc = fe->dvb->priv; 33 struct flexcop_device *fc = fe->dvb->priv;
@@ -49,8 +55,7 @@ static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage
49} 55}
50#endif 56#endif
51 57
52#if defined(CONFIG_DVB_S5H1420_MODULE) || defined(CONFIG_DVB_STV0299_MODULE) \ 58#if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312)
53 || defined(CONFIG_DVB_MT312_MODULE)
54static int flexcop_sleep(struct dvb_frontend* fe) 59static int flexcop_sleep(struct dvb_frontend* fe)
55{ 60{
56 struct flexcop_device *fc = fe->dvb->priv; 61 struct flexcop_device *fc = fe->dvb->priv;
@@ -61,7 +66,7 @@ static int flexcop_sleep(struct dvb_frontend* fe)
61#endif 66#endif
62 67
63/* SkyStar2 DVB-S rev 2.3 */ 68/* SkyStar2 DVB-S rev 2.3 */
64#if defined(CONFIG_DVB_MT312_MODULE) 69#if FE_SUPPORTED(MT312)
65static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) 70static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
66{ 71{
67/* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */ 72/* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */
@@ -193,10 +198,12 @@ static int skystar2_rev23_attach(struct flexcop_device *fc,
193 } 198 }
194 return 0; 199 return 0;
195} 200}
201#else
202#define skystar2_rev23_attach NULL
196#endif 203#endif
197 204
198/* SkyStar2 DVB-S rev 2.6 */ 205/* SkyStar2 DVB-S rev 2.6 */
199#if defined(CONFIG_DVB_STV0299_MODULE) 206#if FE_SUPPORTED(STV0299)
200static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe, 207static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe,
201 u32 srate, u32 ratio) 208 u32 srate, u32 ratio)
202{ 209{
@@ -321,10 +328,12 @@ static int skystar2_rev26_attach(struct flexcop_device *fc,
321 } 328 }
322 return 0; 329 return 0;
323} 330}
331#else
332#define skystar2_rev26_attach NULL
324#endif 333#endif
325 334
326/* SkyStar2 DVB-S rev 2.7 */ 335/* SkyStar2 DVB-S rev 2.7 */
327#if defined(CONFIG_DVB_S5H1420_MODULE) 336#if FE_SUPPORTED(S5H1420) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_ITD1000)
328static struct s5h1420_config skystar2_rev2_7_s5h1420_config = { 337static struct s5h1420_config skystar2_rev2_7_s5h1420_config = {
329 .demod_address = 0x53, 338 .demod_address = 0x53,
330 .invert = 1, 339 .invert = 1,
@@ -385,10 +394,12 @@ fail:
385 fc->fc_i2c_adap[0].no_base_addr = 0; 394 fc->fc_i2c_adap[0].no_base_addr = 0;
386 return 0; 395 return 0;
387} 396}
397#else
398#define skystar2_rev27_attach NULL
388#endif 399#endif
389 400
390/* SkyStar2 rev 2.8 */ 401/* SkyStar2 rev 2.8 */
391#if defined(CONFIG_DVB_CX24123_MODULE) 402#if FE_SUPPORTED(CX24123) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_CX24113)
392static struct cx24123_config skystar2_rev2_8_cx24123_config = { 403static struct cx24123_config skystar2_rev2_8_cx24123_config = {
393 .demod_address = 0x55, 404 .demod_address = 0x55,
394 .dont_use_pll = 1, 405 .dont_use_pll = 1,
@@ -433,10 +444,12 @@ static int skystar2_rev28_attach(struct flexcop_device *fc,
433 * IR-receiver (PIC16F818) - but the card has no input for that ??? */ 444 * IR-receiver (PIC16F818) - but the card has no input for that ??? */
434 return 1; 445 return 1;
435} 446}
447#else
448#define skystar2_rev28_attach NULL
436#endif 449#endif
437 450
438/* AirStar DVB-T */ 451/* AirStar DVB-T */
439#if defined(CONFIG_DVB_MT352_MODULE) 452#if FE_SUPPORTED(MT352)
440static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe) 453static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe)
441{ 454{
442 static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d }; 455 static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d };
@@ -495,10 +508,12 @@ static int airstar_dvbt_attach(struct flexcop_device *fc,
495 } 508 }
496 return 0; 509 return 0;
497} 510}
511#else
512#define airstar_dvbt_attach NULL
498#endif 513#endif
499 514
500/* AirStar ATSC 1st generation */ 515/* AirStar ATSC 1st generation */
501#if defined(CONFIG_DVB_BCM3510_MODULE) 516#if FE_SUPPORTED(BCM3510)
502static int flexcop_fe_request_firmware(struct dvb_frontend *fe, 517static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
503 const struct firmware **fw, char* name) 518 const struct firmware **fw, char* name)
504{ 519{
@@ -517,10 +532,12 @@ static int airstar_atsc1_attach(struct flexcop_device *fc,
517 fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c); 532 fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c);
518 return fc->fe != NULL; 533 return fc->fe != NULL;
519} 534}
535#else
536#define airstar_atsc1_attach NULL
520#endif 537#endif
521 538
522/* AirStar ATSC 2nd generation */ 539/* AirStar ATSC 2nd generation */
523#if defined(CONFIG_DVB_NXT200X_MODULE) 540#if FE_SUPPORTED(NXT200X) && FE_SUPPORTED(PLL)
524static struct nxt200x_config samsung_tbmv_config = { 541static struct nxt200x_config samsung_tbmv_config = {
525 .demod_address = 0x0a, 542 .demod_address = 0x0a,
526}; 543};
@@ -535,10 +552,12 @@ static int airstar_atsc2_attach(struct flexcop_device *fc,
535 return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, 552 return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL,
536 DVB_PLL_SAMSUNG_TBMV); 553 DVB_PLL_SAMSUNG_TBMV);
537} 554}
555#else
556#define airstar_atsc2_attach NULL
538#endif 557#endif
539 558
540/* AirStar ATSC 3rd generation */ 559/* AirStar ATSC 3rd generation */
541#if defined(CONFIG_DVB_LGDT330X_MODULE) 560#if FE_SUPPORTED(LGDT330X)
542static struct lgdt330x_config air2pc_atsc_hd5000_config = { 561static struct lgdt330x_config air2pc_atsc_hd5000_config = {
543 .demod_address = 0x59, 562 .demod_address = 0x59,
544 .demod_chip = LGDT3303, 563 .demod_chip = LGDT3303,
@@ -556,10 +575,12 @@ static int airstar_atsc3_attach(struct flexcop_device *fc,
556 return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61, 575 return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61,
557 TUNER_LG_TDVS_H06XF); 576 TUNER_LG_TDVS_H06XF);
558} 577}
578#else
579#define airstar_atsc3_attach NULL
559#endif 580#endif
560 581
561/* CableStar2 DVB-C */ 582/* CableStar2 DVB-C */
562#if defined(CONFIG_DVB_STV0297_MODULE) 583#if FE_SUPPORTED(STV0297)
563static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe, 584static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe,
564 struct dvb_frontend_parameters *fep) 585 struct dvb_frontend_parameters *fep)
565{ 586{
@@ -698,39 +719,23 @@ static int cablestar2_attach(struct flexcop_device *fc,
698 fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params; 719 fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params;
699 return 1; 720 return 1;
700} 721}
722#else
723#define cablestar2_attach NULL
701#endif 724#endif
702 725
703static struct { 726static struct {
704 flexcop_device_type_t type; 727 flexcop_device_type_t type;
705 int (*attach)(struct flexcop_device *, struct i2c_adapter *); 728 int (*attach)(struct flexcop_device *, struct i2c_adapter *);
706} flexcop_frontends[] = { 729} flexcop_frontends[] = {
707#if defined(CONFIG_DVB_S5H1420_MODULE)
708 { FC_SKY_REV27, skystar2_rev27_attach }, 730 { FC_SKY_REV27, skystar2_rev27_attach },
709#endif
710#if defined(CONFIG_DVB_CX24123_MODULE)
711 { FC_SKY_REV28, skystar2_rev28_attach }, 731 { FC_SKY_REV28, skystar2_rev28_attach },
712#endif
713#if defined(CONFIG_DVB_STV0299_MODULE)
714 { FC_SKY_REV26, skystar2_rev26_attach }, 732 { FC_SKY_REV26, skystar2_rev26_attach },
715#endif
716#if defined(CONFIG_DVB_MT352_MODULE)
717 { FC_AIR_DVBT, airstar_dvbt_attach }, 733 { FC_AIR_DVBT, airstar_dvbt_attach },
718#endif
719#if defined(CONFIG_DVB_NXT200X_MODULE)
720 { FC_AIR_ATSC2, airstar_atsc2_attach }, 734 { FC_AIR_ATSC2, airstar_atsc2_attach },
721#endif
722#if defined(CONFIG_DVB_LGDT330X_MODULE)
723 { FC_AIR_ATSC3, airstar_atsc3_attach }, 735 { FC_AIR_ATSC3, airstar_atsc3_attach },
724#endif
725#if defined(CONFIG_DVB_BCM3510_MODULE)
726 { FC_AIR_ATSC1, airstar_atsc1_attach }, 736 { FC_AIR_ATSC1, airstar_atsc1_attach },
727#endif
728#if defined(CONFIG_DVB_STV0297_MODULE)
729 { FC_CABLE, cablestar2_attach }, 737 { FC_CABLE, cablestar2_attach },
730#endif
731#if defined(CONFIG_DVB_MT312_MODULE)
732 { FC_SKY_REV23, skystar2_rev23_attach }, 738 { FC_SKY_REV23, skystar2_rev23_attach },
733#endif
734}; 739};
735 740
736/* try to figure out the frontend */ 741/* try to figure out the frontend */
@@ -738,6 +743,8 @@ int flexcop_frontend_init(struct flexcop_device *fc)
738{ 743{
739 int i; 744 int i;
740 for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) { 745 for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) {
746 if (!flexcop_frontends[i].attach)
747 continue;
741 /* type needs to be set before, because of some workarounds 748 /* type needs to be set before, because of some workarounds
742 * done based on the probed card type */ 749 * done based on the probed card type */
743 fc->dev_type = flexcop_frontends[i].type; 750 fc->dev_type = flexcop_frontends[i].type;
diff --git a/drivers/media/dvb/bt8xx/dst_ca.c b/drivers/media/dvb/bt8xx/dst_ca.c
index 4601b059b2b2..0e246eaad05a 100644
--- a/drivers/media/dvb/bt8xx/dst_ca.c
+++ b/drivers/media/dvb/bt8xx/dst_ca.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/smp_lock.h>
24#include <linux/string.h> 25#include <linux/string.h>
25#include <linux/dvb/ca.h> 26#include <linux/dvb/ca.h>
26#include "dvbdev.h" 27#include "dvbdev.h"
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index 79927305e84d..487919bea7ae 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -27,7 +27,6 @@
27#include <linux/poll.h> 27#include <linux/poll.h>
28#include <linux/fs.h> 28#include <linux/fs.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/smp_lock.h>
31 30
32#define DVB_MAJOR 212 31#define DVB_MAJOR 212
33 32
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index 136c5863d81b..12e018b4107d 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -527,6 +527,10 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
527 u8 i, buf[3] = {0, 0, 0}; 527 u8 i, buf[3] = {0, 0, 0};
528 *auto_mode = 0; /* set if parameters are requested to auto set */ 528 *auto_mode = 0; /* set if parameters are requested to auto set */
529 529
530 /* Try auto-detect transmission parameters in case of AUTO requested or
531 garbage parameters given by application for compatibility.
532 MPlayer seems to provide garbage parameters currently. */
533
530 switch (params->transmission_mode) { 534 switch (params->transmission_mode) {
531 case TRANSMISSION_MODE_AUTO: 535 case TRANSMISSION_MODE_AUTO:
532 *auto_mode = 1; 536 *auto_mode = 1;
@@ -536,7 +540,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
536 buf[0] |= (1 << 0); 540 buf[0] |= (1 << 0);
537 break; 541 break;
538 default: 542 default:
539 return -EINVAL; 543 deb_info("%s: invalid transmission_mode\n", __func__);
544 *auto_mode = 1;
540 } 545 }
541 546
542 switch (params->guard_interval) { 547 switch (params->guard_interval) {
@@ -554,7 +559,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
554 buf[0] |= (3 << 2); 559 buf[0] |= (3 << 2);
555 break; 560 break;
556 default: 561 default:
557 return -EINVAL; 562 deb_info("%s: invalid guard_interval\n", __func__);
563 *auto_mode = 1;
558 } 564 }
559 565
560 switch (params->hierarchy_information) { 566 switch (params->hierarchy_information) {
@@ -572,7 +578,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
572 buf[0] |= (3 << 4); 578 buf[0] |= (3 << 4);
573 break; 579 break;
574 default: 580 default:
575 return -EINVAL; 581 deb_info("%s: invalid hierarchy_information\n", __func__);
582 *auto_mode = 1;
576 }; 583 };
577 584
578 switch (params->constellation) { 585 switch (params->constellation) {
@@ -587,7 +594,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
587 buf[1] |= (2 << 6); 594 buf[1] |= (2 << 6);
588 break; 595 break;
589 default: 596 default:
590 return -EINVAL; 597 deb_info("%s: invalid constellation\n", __func__);
598 *auto_mode = 1;
591 } 599 }
592 600
593 /* Use HP. How and which case we can switch to LP? */ 601 /* Use HP. How and which case we can switch to LP? */
@@ -611,7 +619,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
611 buf[2] |= (4 << 0); 619 buf[2] |= (4 << 0);
612 break; 620 break;
613 default: 621 default:
614 return -EINVAL; 622 deb_info("%s: invalid code_rate_HP\n", __func__);
623 *auto_mode = 1;
615 } 624 }
616 625
617 switch (params->code_rate_LP) { 626 switch (params->code_rate_LP) {
@@ -638,7 +647,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
638 if (params->hierarchy_information == HIERARCHY_AUTO) 647 if (params->hierarchy_information == HIERARCHY_AUTO)
639 break; 648 break;
640 default: 649 default:
641 return -EINVAL; 650 deb_info("%s: invalid code_rate_LP\n", __func__);
651 *auto_mode = 1;
642 } 652 }
643 653
644 switch (params->bandwidth) { 654 switch (params->bandwidth) {
@@ -651,7 +661,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
651 buf[1] |= (2 << 2); 661 buf[1] |= (2 << 2);
652 break; 662 break;
653 default: 663 default:
654 return -EINVAL; 664 deb_info("%s: invalid bandwidth\n", __func__);
665 buf[1] |= (2 << 2); /* cannot auto-detect BW, try 8 MHz */
655 } 666 }
656 667
657 /* program */ 668 /* program */
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 68eb4493f991..d8d4214fd65f 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -1,5 +1,6 @@
1config TTPCI_EEPROM 1config TTPCI_EEPROM
2 tristate 2 tristate
3 depends on I2C
3 default n 4 default n
4 5
5config DVB_AV7110 6config DVB_AV7110
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index d1d959ed37b7..8d65c652ba50 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -36,7 +36,6 @@
36#include <linux/fs.h> 36#include <linux/fs.h>
37#include <linux/timer.h> 37#include <linux/timer.h>
38#include <linux/poll.h> 38#include <linux/poll.h>
39#include <linux/smp_lock.h>
40 39
41#include <linux/kernel.h> 40#include <linux/kernel.h>
42#include <linux/sched.h> 41#include <linux/sched.h>
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 837467f93805..575bf9d89419 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -58,6 +58,7 @@
58#include <linux/module.h> 58#include <linux/module.h>
59#include <linux/init.h> 59#include <linux/init.h>
60#include <linux/slab.h> 60#include <linux/slab.h>
61#include <linux/smp_lock.h>
61#include <linux/input.h> 62#include <linux/input.h>
62#include <linux/videodev2.h> 63#include <linux/videodev2.h>
63#include <media/v4l2-device.h> 64#include <media/v4l2-device.h>
diff --git a/drivers/media/radio/radio-si470x.c b/drivers/media/radio/radio-si470x.c
index 640421ceb24a..e85f318b4d2b 100644
--- a/drivers/media/radio/radio-si470x.c
+++ b/drivers/media/radio/radio-si470x.c
@@ -127,6 +127,7 @@
127#include <linux/module.h> 127#include <linux/module.h>
128#include <linux/init.h> 128#include <linux/init.h>
129#include <linux/slab.h> 129#include <linux/slab.h>
130#include <linux/smp_lock.h>
130#include <linux/input.h> 131#include <linux/input.h>
131#include <linux/usb.h> 132#include <linux/usb.h>
132#include <linux/hid.h> 133#include <linux/hid.h>
@@ -1200,7 +1201,7 @@ static int si470x_fops_release(struct file *file)
1200 video_unregister_device(radio->videodev); 1201 video_unregister_device(radio->videodev);
1201 kfree(radio->buffer); 1202 kfree(radio->buffer);
1202 kfree(radio); 1203 kfree(radio);
1203 goto done; 1204 goto unlock;
1204 } 1205 }
1205 1206
1206 /* stop rds reception */ 1207 /* stop rds reception */
@@ -1213,9 +1214,8 @@ static int si470x_fops_release(struct file *file)
1213 retval = si470x_stop(radio); 1214 retval = si470x_stop(radio);
1214 usb_autopm_put_interface(radio->intf); 1215 usb_autopm_put_interface(radio->intf);
1215 } 1216 }
1216 1217unlock:
1217 mutex_unlock(&radio->disconnect_lock); 1218 mutex_unlock(&radio->disconnect_lock);
1218
1219done: 1219done:
1220 return retval; 1220 return retval;
1221} 1221}
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 061e147f6f26..84b6fc15519d 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -312,6 +312,14 @@ config VIDEO_OV7670
312 OV7670 VGA camera. It currently only works with the M88ALP01 312 OV7670 VGA camera. It currently only works with the M88ALP01
313 controller. 313 controller.
314 314
315config VIDEO_MT9V011
316 tristate "Micron mt9v011 sensor support"
317 depends on I2C && VIDEO_V4L2
318 ---help---
319 This is a Video4Linux2 sensor-level driver for the Micron
320 mt0v011 1.3 Mpixel camera. It currently only works with the
321 em28xx driver.
322
315config VIDEO_TCM825X 323config VIDEO_TCM825X
316 tristate "TCM825x camera sensor support" 324 tristate "TCM825x camera sensor support"
317 depends on I2C && VIDEO_V4L2 325 depends on I2C && VIDEO_V4L2
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 7fb3add1b387..9f2e3214a482 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
69obj-$(CONFIG_VIDEO_OV7670) += ov7670.o 69obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
70obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o 70obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
71obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o 71obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
72obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
72 73
73obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o 74obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
74obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o 75obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index fdb4adff3d28..ca6558c394be 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -3324,8 +3324,6 @@ void __devinit bttv_init_card1(struct bttv *btv)
3324/* initialization part two -- after registering i2c bus */ 3324/* initialization part two -- after registering i2c bus */
3325void __devinit bttv_init_card2(struct bttv *btv) 3325void __devinit bttv_init_card2(struct bttv *btv)
3326{ 3326{
3327 int addr=ADDR_UNSET;
3328
3329 btv->tuner_type = UNSET; 3327 btv->tuner_type = UNSET;
3330 3328
3331 if (BTTV_BOARD_UNKNOWN == btv->c.type) { 3329 if (BTTV_BOARD_UNKNOWN == btv->c.type) {
@@ -3470,9 +3468,6 @@ void __devinit bttv_init_card2(struct bttv *btv)
3470 btv->pll.pll_current = -1; 3468 btv->pll.pll_current = -1;
3471 3469
3472 /* tuner configuration (from card list / autodetect / insmod option) */ 3470 /* tuner configuration (from card list / autodetect / insmod option) */
3473 if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr)
3474 addr = bttv_tvcards[btv->c.type].tuner_addr;
3475
3476 if (UNSET != bttv_tvcards[btv->c.type].tuner_type) 3471 if (UNSET != bttv_tvcards[btv->c.type].tuner_type)
3477 if (UNSET == btv->tuner_type) 3472 if (UNSET == btv->tuner_type)
3478 btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type; 3473 btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type;
@@ -3496,40 +3491,6 @@ void __devinit bttv_init_card2(struct bttv *btv)
3496 if (UNSET == btv->tuner_type) 3491 if (UNSET == btv->tuner_type)
3497 btv->tuner_type = TUNER_ABSENT; 3492 btv->tuner_type = TUNER_ABSENT;
3498 3493
3499 if (btv->tuner_type != TUNER_ABSENT) {
3500 struct tuner_setup tun_setup;
3501
3502 /* Load tuner module before issuing tuner config call! */
3503 if (bttv_tvcards[btv->c.type].has_radio)
3504 v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev,
3505 &btv->c.i2c_adap, "tuner", "tuner",
3506 v4l2_i2c_tuner_addrs(ADDRS_RADIO));
3507 v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev,
3508 &btv->c.i2c_adap, "tuner", "tuner",
3509 v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
3510 v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev,
3511 &btv->c.i2c_adap, "tuner", "tuner",
3512 v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD));
3513
3514 tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV;
3515 tun_setup.type = btv->tuner_type;
3516 tun_setup.addr = addr;
3517
3518 if (bttv_tvcards[btv->c.type].has_radio)
3519 tun_setup.mode_mask |= T_RADIO;
3520
3521 bttv_call_all(btv, tuner, s_type_addr, &tun_setup);
3522 }
3523
3524 if (btv->tda9887_conf) {
3525 struct v4l2_priv_tun_config tda9887_cfg;
3526
3527 tda9887_cfg.tuner = TUNER_TDA9887;
3528 tda9887_cfg.priv = &btv->tda9887_conf;
3529
3530 bttv_call_all(btv, tuner, s_config, &tda9887_cfg);
3531 }
3532
3533 btv->dig = bttv_tvcards[btv->c.type].has_dig_in ? 3494 btv->dig = bttv_tvcards[btv->c.type].has_dig_in ?
3534 bttv_tvcards[btv->c.type].video_inputs - 1 : UNSET; 3495 bttv_tvcards[btv->c.type].video_inputs - 1 : UNSET;
3535 btv->svhs = bttv_tvcards[btv->c.type].svhs == NO_SVHS ? 3496 btv->svhs = bttv_tvcards[btv->c.type].svhs == NO_SVHS ?
@@ -3540,15 +3501,15 @@ void __devinit bttv_init_card2(struct bttv *btv)
3540 btv->has_remote = remote[btv->c.nr]; 3501 btv->has_remote = remote[btv->c.nr];
3541 3502
3542 if (bttv_tvcards[btv->c.type].has_radio) 3503 if (bttv_tvcards[btv->c.type].has_radio)
3543 btv->has_radio=1; 3504 btv->has_radio = 1;
3544 if (bttv_tvcards[btv->c.type].has_remote) 3505 if (bttv_tvcards[btv->c.type].has_remote)
3545 btv->has_remote=1; 3506 btv->has_remote = 1;
3546 if (!bttv_tvcards[btv->c.type].no_gpioirq) 3507 if (!bttv_tvcards[btv->c.type].no_gpioirq)
3547 btv->gpioirq=1; 3508 btv->gpioirq = 1;
3548 if (bttv_tvcards[btv->c.type].volume_gpio) 3509 if (bttv_tvcards[btv->c.type].volume_gpio)
3549 btv->volume_gpio=bttv_tvcards[btv->c.type].volume_gpio; 3510 btv->volume_gpio = bttv_tvcards[btv->c.type].volume_gpio;
3550 if (bttv_tvcards[btv->c.type].audio_mode_gpio) 3511 if (bttv_tvcards[btv->c.type].audio_mode_gpio)
3551 btv->audio_mode_gpio=bttv_tvcards[btv->c.type].audio_mode_gpio; 3512 btv->audio_mode_gpio = bttv_tvcards[btv->c.type].audio_mode_gpio;
3552 3513
3553 if (btv->tuner_type == TUNER_ABSENT) 3514 if (btv->tuner_type == TUNER_ABSENT)
3554 return; /* no tuner or related drivers to load */ 3515 return; /* no tuner or related drivers to load */
@@ -3666,6 +3627,49 @@ no_audio:
3666} 3627}
3667 3628
3668 3629
3630/* initialize the tuner */
3631void __devinit bttv_init_tuner(struct bttv *btv)
3632{
3633 int addr = ADDR_UNSET;
3634
3635 if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr)
3636 addr = bttv_tvcards[btv->c.type].tuner_addr;
3637
3638 if (btv->tuner_type != TUNER_ABSENT) {
3639 struct tuner_setup tun_setup;
3640
3641 /* Load tuner module before issuing tuner config call! */
3642 if (bttv_tvcards[btv->c.type].has_radio)
3643 v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev,
3644 &btv->c.i2c_adap, "tuner", "tuner",
3645 v4l2_i2c_tuner_addrs(ADDRS_RADIO));
3646 v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev,
3647 &btv->c.i2c_adap, "tuner", "tuner",
3648 v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
3649 v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev,
3650 &btv->c.i2c_adap, "tuner", "tuner",
3651 v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD));
3652
3653 tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV;
3654 tun_setup.type = btv->tuner_type;
3655 tun_setup.addr = addr;
3656
3657 if (bttv_tvcards[btv->c.type].has_radio)
3658 tun_setup.mode_mask |= T_RADIO;
3659
3660 bttv_call_all(btv, tuner, s_type_addr, &tun_setup);
3661 }
3662
3663 if (btv->tda9887_conf) {
3664 struct v4l2_priv_tun_config tda9887_cfg;
3665
3666 tda9887_cfg.tuner = TUNER_TDA9887;
3667 tda9887_cfg.priv = &btv->tda9887_conf;
3668
3669 bttv_call_all(btv, tuner, s_config, &tda9887_cfg);
3670 }
3671}
3672
3669/* ----------------------------------------------------------------------- */ 3673/* ----------------------------------------------------------------------- */
3670 3674
3671static void modtec_eeprom(struct bttv *btv) 3675static void modtec_eeprom(struct bttv *btv)
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 5eb1464af670..8cc6dd28d6a7 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -41,6 +41,7 @@
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/kernel.h> 42#include <linux/kernel.h>
43#include <linux/sched.h> 43#include <linux/sched.h>
44#include <linux/smp_lock.h>
44#include <linux/interrupt.h> 45#include <linux/interrupt.h>
45#include <linux/kdev_t.h> 46#include <linux/kdev_t.h>
46#include "bttvp.h" 47#include "bttvp.h"
@@ -4418,6 +4419,7 @@ static int __devinit bttv_probe(struct pci_dev *dev,
4418 4419
4419 /* some card-specific stuff (needs working i2c) */ 4420 /* some card-specific stuff (needs working i2c) */
4420 bttv_init_card2(btv); 4421 bttv_init_card2(btv);
4422 bttv_init_tuner(btv);
4421 init_irqreg(btv); 4423 init_irqreg(btv);
4422 4424
4423 /* register video4linux + input */ 4425 /* register video4linux + input */
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index 3d36daf206f3..3ec2402c6b4a 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -283,6 +283,7 @@ extern struct tvcard bttv_tvcards[];
283extern void bttv_idcard(struct bttv *btv); 283extern void bttv_idcard(struct bttv *btv);
284extern void bttv_init_card1(struct bttv *btv); 284extern void bttv_init_card1(struct bttv *btv);
285extern void bttv_init_card2(struct bttv *btv); 285extern void bttv_init_card2(struct bttv *btv);
286extern void bttv_init_tuner(struct bttv *btv);
286 287
287/* card-specific funtions */ 288/* card-specific funtions */
288extern void tea5757_set_freq(struct bttv *btv, unsigned short freq); 289extern void tea5757_set_freq(struct bttv *btv, unsigned short freq);
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index c92a25036f0e..36f2d76006fd 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -198,11 +198,14 @@ static const struct cx18_card_pci_info cx18_pci_mpc718[] = {
198 198
199static const struct cx18_card cx18_card_mpc718 = { 199static const struct cx18_card cx18_card_mpc718 = {
200 .type = CX18_CARD_YUAN_MPC718, 200 .type = CX18_CARD_YUAN_MPC718,
201 .name = "Yuan MPC718", 201 .name = "Yuan MPC718 MiniPCI DVB-T/Analog",
202 .comment = "Analog video capture works; some audio line in may not.\n", 202 .comment = "Experimenters needed for device to work well.\n"
203 "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
203 .v4l2_capabilities = CX18_CAP_ENCODER, 204 .v4l2_capabilities = CX18_CAP_ENCODER,
204 .hw_audio_ctrl = CX18_HW_418_AV, 205 .hw_audio_ctrl = CX18_HW_418_AV,
205 .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL, 206 .hw_muxer = CX18_HW_GPIO_MUX,
207 .hw_all = CX18_HW_418_AV | CX18_HW_TUNER |
208 CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL,
206 .video_inputs = { 209 .video_inputs = {
207 { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, 210 { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
208 { CX18_CARD_INPUT_SVIDEO1, 1, 211 { CX18_CARD_INPUT_SVIDEO1, 1,
@@ -211,27 +214,34 @@ static const struct cx18_card cx18_card_mpc718 = {
211 { CX18_CARD_INPUT_SVIDEO2, 2, 214 { CX18_CARD_INPUT_SVIDEO2, 2,
212 CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 }, 215 CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 },
213 { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 }, 216 { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 },
214 { CX18_CARD_INPUT_COMPOSITE3, 2, CX18_AV_COMPOSITE3 },
215 }, 217 },
216 .audio_inputs = { 218 .audio_inputs = {
217 { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, 219 { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
218 { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 0 }, 220 { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
219 { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL1, 0 }, 221 { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 },
220 }, 222 },
221 .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, 0 },
222 .tuners = { 223 .tuners = {
223 /* XC3028 tuner */ 224 /* XC3028 tuner */
224 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, 225 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
225 }, 226 },
227 /* FIXME - the FM radio is just a guess and driver doesn't use SIF */
228 .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
226 .ddr = { 229 .ddr = {
227 /* Probably Samsung K4D263238G-VC33 memory */ 230 /* Hynix HY5DU283222B DDR RAM */
228 .chip_config = 0x003, 231 .chip_config = 0x303,
229 .refresh = 0x30c, 232 .refresh = 0x3bd,
230 .timing1 = 0x23230b73, 233 .timing1 = 0x36320966,
231 .timing2 = 0x08, 234 .timing2 = 0x1f,
232 .tune_lane = 0, 235 .tune_lane = 0,
233 .initial_emrs = 2, 236 .initial_emrs = 2,
234 }, 237 },
238 .gpio_init.initial_value = 0x1,
239 .gpio_init.direction = 0x3,
240 /* FIXME - these GPIO's are just guesses */
241 .gpio_audio_input = { .mask = 0x3,
242 .tuner = 0x1,
243 .linein = 0x3,
244 .radio = 0x1 },
235 .xceive_pin = 0, 245 .xceive_pin = 0,
236 .pci_list = cx18_pci_mpc718, 246 .pci_list = cx18_pci_mpc718,
237 .i2c = &cx18_i2c_std, 247 .i2c = &cx18_i2c_std,
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index 6ea3fe623ef4..51a0c33b25b7 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -30,6 +30,10 @@
30#include "s5h1409.h" 30#include "s5h1409.h"
31#include "mxl5005s.h" 31#include "mxl5005s.h"
32#include "zl10353.h" 32#include "zl10353.h"
33
34#include <linux/firmware.h>
35#include "mt352.h"
36#include "mt352_priv.h"
33#include "tuner-xc2028.h" 37#include "tuner-xc2028.h"
34 38
35DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 39DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -38,6 +42,11 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
38#define CX18_CLOCK_ENABLE2 0xc71024 42#define CX18_CLOCK_ENABLE2 0xc71024
39#define CX18_DMUX_CLK_MASK 0x0080 43#define CX18_DMUX_CLK_MASK 0x0080
40 44
45/*
46 * CX18_CARD_HVR_1600_ESMT
47 * CX18_CARD_HVR_1600_SAMSUNG
48 */
49
41static struct mxl5005s_config hauppauge_hvr1600_tuner = { 50static struct mxl5005s_config hauppauge_hvr1600_tuner = {
42 .i2c_address = 0xC6 >> 1, 51 .i2c_address = 0xC6 >> 1,
43 .if_freq = IF_FREQ_5380000HZ, 52 .if_freq = IF_FREQ_5380000HZ,
@@ -65,6 +74,9 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
65 .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK 74 .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
66}; 75};
67 76
77/*
78 * CX18_CARD_LEADTEK_DVR3100H
79 */
68/* Information/confirmation of proper config values provided by Terry Wu */ 80/* Information/confirmation of proper config values provided by Terry Wu */
69static struct zl10353_config leadtek_dvr3100h_demod = { 81static struct zl10353_config leadtek_dvr3100h_demod = {
70 .demod_address = 0x1e >> 1, /* Datasheet suggested straps */ 82 .demod_address = 0x1e >> 1, /* Datasheet suggested straps */
@@ -74,6 +86,121 @@ static struct zl10353_config leadtek_dvr3100h_demod = {
74 .disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */ 86 .disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */
75}; 87};
76 88
89/*
90 * CX18_CARD_YUAN_MPC718
91 */
92/*
93 * Due to
94 *
95 * 1. an absence of information on how to prgram the MT352
96 * 2. the Linux mt352 module pushing MT352 initialzation off onto us here
97 *
98 * We have to use an init sequence that *you* must extract from the Windows
99 * driver (yuanrap.sys) and which we load as a firmware.
100 *
101 * If someone can provide me with a Zarlink MT352 (Intel CE6352?) Design Manual
102 * with chip programming details, then I can remove this annoyance.
103 */
104static int yuan_mpc718_mt352_reqfw(struct cx18_stream *stream,
105 const struct firmware **fw)
106{
107 struct cx18 *cx = stream->cx;
108 const char *fn = "dvb-cx18-mpc718-mt352.fw";
109 int ret;
110
111 ret = request_firmware(fw, fn, &cx->pci_dev->dev);
112 if (ret)
113 CX18_ERR("Unable to open firmware file %s\n", fn);
114 else {
115 size_t sz = (*fw)->size;
116 if (sz < 2 || sz > 64 || (sz % 2) != 0) {
117 CX18_ERR("Firmware %s has a bad size: %lu bytes\n",
118 fn, (unsigned long) sz);
119 ret = -EILSEQ;
120 release_firmware(*fw);
121 *fw = NULL;
122 }
123 }
124
125 if (ret) {
126 CX18_ERR("The MPC718 board variant with the MT352 DVB-T"
127 "demodualtor will not work without it\n");
128 CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware "
129 "mpc718' if you need the firmware\n");
130 }
131 return ret;
132}
133
134static int yuan_mpc718_mt352_init(struct dvb_frontend *fe)
135{
136 struct cx18_dvb *dvb = container_of(fe->dvb,
137 struct cx18_dvb, dvb_adapter);
138 struct cx18_stream *stream = container_of(dvb, struct cx18_stream, dvb);
139 const struct firmware *fw = NULL;
140 int ret;
141 int i;
142 u8 buf[3];
143
144 ret = yuan_mpc718_mt352_reqfw(stream, &fw);
145 if (ret)
146 return ret;
147
148 /* Loop through all the register-value pairs in the firmware file */
149 for (i = 0; i < fw->size; i += 2) {
150 buf[0] = fw->data[i];
151 /* Intercept a few registers we want to set ourselves */
152 switch (buf[0]) {
153 case TRL_NOMINAL_RATE_0:
154 /* Set our custom OFDM bandwidth in the case below */
155 break;
156 case TRL_NOMINAL_RATE_1:
157 /* 6 MHz: 64/7 * 6/8 / 20.48 * 2^16 = 0x55b6.db6 */
158 /* 7 MHz: 64/7 * 7/8 / 20.48 * 2^16 = 0x6400 */
159 /* 8 MHz: 64/7 * 8/8 / 20.48 * 2^16 = 0x7249.249 */
160 buf[1] = 0x72;
161 buf[2] = 0x49;
162 mt352_write(fe, buf, 3);
163 break;
164 case INPUT_FREQ_0:
165 /* Set our custom IF in the case below */
166 break;
167 case INPUT_FREQ_1:
168 /* 4.56 MHz IF: (20.48 - 4.56)/20.48 * 2^14 = 0x31c0 */
169 buf[1] = 0x31;
170 buf[2] = 0xc0;
171 mt352_write(fe, buf, 3);
172 break;
173 default:
174 /* Pass through the register-value pair from the fw */
175 buf[1] = fw->data[i+1];
176 mt352_write(fe, buf, 2);
177 break;
178 }
179 }
180
181 buf[0] = (u8) TUNER_GO;
182 buf[1] = 0x01; /* Go */
183 mt352_write(fe, buf, 2);
184 release_firmware(fw);
185 return 0;
186}
187
188static struct mt352_config yuan_mpc718_mt352_demod = {
189 .demod_address = 0x1e >> 1,
190 .adc_clock = 20480, /* 20.480 MHz */
191 .if2 = 4560, /* 4.560 MHz */
192 .no_tuner = 1, /* XC3028 is not behind the gate */
193 .demod_init = yuan_mpc718_mt352_init,
194};
195
196static struct zl10353_config yuan_mpc718_zl10353_demod = {
197 .demod_address = 0x1e >> 1, /* Datasheet suggested straps */
198 .if2 = 45600, /* 4.560 MHz IF from the XC3028 */
199 .parallel_ts = 1, /* Not a serial TS */
200 .no_tuner = 1, /* XC3028 is not behind the gate */
201 .disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */
202};
203
77static int dvb_register(struct cx18_stream *stream); 204static int dvb_register(struct cx18_stream *stream);
78 205
79/* Kernel DVB framework calls this when the feed needs to start. 206/* Kernel DVB framework calls this when the feed needs to start.
@@ -113,6 +240,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
113 break; 240 break;
114 241
115 case CX18_CARD_LEADTEK_DVR3100H: 242 case CX18_CARD_LEADTEK_DVR3100H:
243 case CX18_CARD_YUAN_MPC718:
116 default: 244 default:
117 /* Assumption - Parallel transport - Signalling 245 /* Assumption - Parallel transport - Signalling
118 * undefined or default. 246 * undefined or default.
@@ -326,6 +454,38 @@ static int dvb_register(struct cx18_stream *stream)
326 fe->ops.tuner_ops.set_config(fe, &ctrl); 454 fe->ops.tuner_ops.set_config(fe, &ctrl);
327 } 455 }
328 break; 456 break;
457 case CX18_CARD_YUAN_MPC718:
458 /*
459 * TODO
460 * Apparently, these cards also could instead have a
461 * DiBcom demod supported by one of the db7000 drivers
462 */
463 dvb->fe = dvb_attach(mt352_attach,
464 &yuan_mpc718_mt352_demod,
465 &cx->i2c_adap[1]);
466 if (dvb->fe == NULL)
467 dvb->fe = dvb_attach(zl10353_attach,
468 &yuan_mpc718_zl10353_demod,
469 &cx->i2c_adap[1]);
470 if (dvb->fe != NULL) {
471 struct dvb_frontend *fe;
472 struct xc2028_config cfg = {
473 .i2c_adap = &cx->i2c_adap[1],
474 .i2c_addr = 0xc2 >> 1,
475 .ctrl = NULL,
476 };
477 static struct xc2028_ctrl ctrl = {
478 .fname = XC2028_DEFAULT_FIRMWARE,
479 .max_len = 64,
480 .demod = XC3028_FE_ZARLINK456,
481 .type = XC2028_AUTO,
482 };
483
484 fe = dvb_attach(xc2028_attach, dvb->fe, &cfg);
485 if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
486 fe->ops.tuner_ops.set_config(fe, &ctrl);
487 }
488 break;
329 default: 489 default:
330 /* No Digital Tv Support */ 490 /* No Digital Tv Support */
331 break; 491 break;
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 2943bfd32a94..e0cf21e0b1bf 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -31,6 +31,7 @@
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/firmware.h> 33#include <linux/firmware.h>
34#include <linux/smp_lock.h>
34#include <media/v4l2-common.h> 35#include <media/v4l2-common.h>
35#include <media/v4l2-ioctl.h> 36#include <media/v4l2-ioctl.h>
36#include <media/cx2341x.h> 37#include <media/cx2341x.h>
@@ -57,7 +58,8 @@ MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages");
57 58
58#define dprintk(level, fmt, arg...)\ 59#define dprintk(level, fmt, arg...)\
59 do { if (v4l_debug >= level) \ 60 do { if (v4l_debug >= level) \
60 printk(KERN_DEBUG "%s: " fmt, dev->name , ## arg);\ 61 printk(KERN_DEBUG "%s: " fmt, \
62 (dev) ? dev->name : "cx23885[?]", ## arg); \
61 } while (0) 63 } while (0)
62 64
63static struct cx23885_tvnorm cx23885_tvnorms[] = { 65static struct cx23885_tvnorm cx23885_tvnorms[] = {
@@ -1676,6 +1678,7 @@ static struct v4l2_file_operations mpeg_fops = {
1676 .read = mpeg_read, 1678 .read = mpeg_read,
1677 .poll = mpeg_poll, 1679 .poll = mpeg_poll,
1678 .mmap = mpeg_mmap, 1680 .mmap = mpeg_mmap,
1681 .ioctl = video_ioctl2,
1679}; 1682};
1680 1683
1681static const struct v4l2_ioctl_ops mpeg_ioctl_ops = { 1684static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 48a975134ac5..86ac529e62be 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -463,6 +463,30 @@ static struct xc5000_config mygica_x8506_xc5000_config = {
463 .if_khz = 5380, 463 .if_khz = 5380,
464}; 464};
465 465
466static int cx23885_dvb_set_frontend(struct dvb_frontend *fe,
467 struct dvb_frontend_parameters *param)
468{
469 struct cx23885_tsport *port = fe->dvb->priv;
470 struct cx23885_dev *dev = port->dev;
471
472 switch (dev->board) {
473 case CX23885_BOARD_HAUPPAUGE_HVR1275:
474 switch (param->u.vsb.modulation) {
475 case VSB_8:
476 cx23885_gpio_clear(dev, GPIO_5);
477 break;
478 case QAM_64:
479 case QAM_256:
480 default:
481 cx23885_gpio_set(dev, GPIO_5);
482 break;
483 }
484 break;
485 }
486 return (port->set_frontend_save) ?
487 port->set_frontend_save(fe, param) : -ENODEV;
488}
489
466static int dvb_register(struct cx23885_tsport *port) 490static int dvb_register(struct cx23885_tsport *port)
467{ 491{
468 struct cx23885_dev *dev = port->dev; 492 struct cx23885_dev *dev = port->dev;
@@ -502,6 +526,12 @@ static int dvb_register(struct cx23885_tsport *port)
502 0x60, &dev->i2c_bus[1].i2c_adap, 526 0x60, &dev->i2c_bus[1].i2c_adap,
503 &hauppauge_hvr127x_config); 527 &hauppauge_hvr127x_config);
504 } 528 }
529
530 /* FIXME: temporary hack */
531 /* define bridge override to set_frontend */
532 port->set_frontend_save = fe0->dvb.frontend->ops.set_frontend;
533 fe0->dvb.frontend->ops.set_frontend = cx23885_dvb_set_frontend;
534
505 break; 535 break;
506 case CX23885_BOARD_HAUPPAUGE_HVR1255: 536 case CX23885_BOARD_HAUPPAUGE_HVR1255:
507 i2c_bus = &dev->i2c_bus[0]; 537 i2c_bus = &dev->i2c_bus[0];
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 70836af3ab48..5d6093336300 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -26,6 +26,7 @@
26#include <linux/kmod.h> 26#include <linux/kmod.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/smp_lock.h>
29#include <linux/interrupt.h> 30#include <linux/interrupt.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
31#include <linux/kthread.h> 32#include <linux/kthread.h>
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index 1a2ac518a3f1..214a55e943b7 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -288,6 +288,10 @@ struct cx23885_tsport {
288 /* Allow a single tsport to have multiple frontends */ 288 /* Allow a single tsport to have multiple frontends */
289 u32 num_frontends; 289 u32 num_frontends;
290 void *port_priv; 290 void *port_priv;
291
292 /* FIXME: temporary hack */
293 int (*set_frontend_save) (struct dvb_frontend *,
294 struct dvb_frontend_parameters *);
291}; 295};
292 296
293struct cx23885_dev { 297struct cx23885_dev {
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 44eacfb0d0d6..356d6896da3f 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/device.h> 33#include <linux/device.h>
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/smp_lock.h>
35#include <media/v4l2-common.h> 36#include <media/v4l2-common.h>
36#include <media/v4l2-ioctl.h> 37#include <media/v4l2-ioctl.h>
37#include <media/cx2341x.h> 38#include <media/cx2341x.h>
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index b12770848c00..2bb54c3ef5cd 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -31,6 +31,7 @@
31#include <linux/kmod.h> 31#include <linux/kmod.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/smp_lock.h>
34#include <linux/interrupt.h> 35#include <linux/interrupt.h>
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36#include <linux/delay.h> 37#include <linux/delay.h>
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index ec2f45dde164..0664d111085f 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -32,6 +32,7 @@
32#include <linux/list.h> 32#include <linux/list.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/smp_lock.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/atomic.h> 38#include <asm/atomic.h>
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index 16a5af30e9d1..6524b493e033 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -8,6 +8,8 @@ config VIDEO_EM28XX
8 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO 8 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
9 select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO 9 select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO
10 select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO 10 select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO
11 select VIDEO_MT9V011 if VIDEO_HELPER_CHIPS_AUTO
12
11 ---help--- 13 ---help---
12 This is a video4linux driver for Empia 28xx based TV cards. 14 This is a video4linux driver for Empia 28xx based TV cards.
13 15
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index c43fdb9bc888..320f1f60276e 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -157,6 +157,20 @@ static struct em28xx_reg_seq evga_indtube_digital[] = {
157 { -1, -1, -1, -1}, 157 { -1, -1, -1, -1},
158}; 158};
159 159
160/* Pinnacle Hybrid Pro eb1a:2881 */
161static struct em28xx_reg_seq pinnacle_hybrid_pro_analog[] = {
162 {EM28XX_R08_GPIO, 0xfd, ~EM_GPIO_4, 10},
163 { -1, -1, -1, -1},
164};
165
166static struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = {
167 {EM28XX_R08_GPIO, 0x6e, ~EM_GPIO_4, 10},
168 {EM2880_R04_GPO, 0x04, 0xff, 100},/* zl10353 reset */
169 {EM2880_R04_GPO, 0x0c, 0xff, 1},
170 { -1, -1, -1, -1},
171};
172
173
160/* Callback for the most boards */ 174/* Callback for the most boards */
161static struct em28xx_reg_seq default_tuner_gpio[] = { 175static struct em28xx_reg_seq default_tuner_gpio[] = {
162 {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10}, 176 {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10},
@@ -191,18 +205,27 @@ static struct em28xx_reg_seq terratec_av350_unmute_gpio[] = {
191 {EM28XX_R08_GPIO, 0xff, 0xff, 10}, 205 {EM28XX_R08_GPIO, 0xff, 0xff, 10},
192 { -1, -1, -1, -1}, 206 { -1, -1, -1, -1},
193}; 207};
208
209static struct em28xx_reg_seq silvercrest_reg_seq[] = {
210 {EM28XX_R08_GPIO, 0xff, 0xff, 10},
211 {EM28XX_R08_GPIO, 0x01, 0xf7, 10},
212 { -1, -1, -1, -1},
213};
214
194/* 215/*
195 * Board definitions 216 * Board definitions
196 */ 217 */
197struct em28xx_board em28xx_boards[] = { 218struct em28xx_board em28xx_boards[] = {
198 [EM2750_BOARD_UNKNOWN] = { 219 [EM2750_BOARD_UNKNOWN] = {
199 .name = "Unknown EM2750/EM2751 webcam grabber", 220 .name = "EM2710/EM2750/EM2751 webcam grabber",
200 .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, 221 .xclk = EM28XX_XCLK_FREQUENCY_48MHZ,
201 .tuner_type = TUNER_ABSENT, /* This is a webcam */ 222 .tuner_type = TUNER_ABSENT,
223 .is_webcam = 1,
202 .input = { { 224 .input = { {
203 .type = EM28XX_VMUX_COMPOSITE1, 225 .type = EM28XX_VMUX_COMPOSITE1,
204 .vmux = 0, 226 .vmux = 0,
205 .amux = EM28XX_AMUX_VIDEO, 227 .amux = EM28XX_AMUX_VIDEO,
228 .gpio = silvercrest_reg_seq,
206 } }, 229 } },
207 }, 230 },
208 [EM2800_BOARD_UNKNOWN] = { 231 [EM2800_BOARD_UNKNOWN] = {
@@ -224,13 +247,15 @@ struct em28xx_board em28xx_boards[] = {
224 [EM2820_BOARD_UNKNOWN] = { 247 [EM2820_BOARD_UNKNOWN] = {
225 .name = "Unknown EM2750/28xx video grabber", 248 .name = "Unknown EM2750/28xx video grabber",
226 .tuner_type = TUNER_ABSENT, 249 .tuner_type = TUNER_ABSENT,
250 .is_webcam = 1, /* To enable sensor probe */
227 }, 251 },
228 [EM2750_BOARD_DLCW_130] = { 252 [EM2750_BOARD_DLCW_130] = {
229 /* Beijing Huaqi Information Digital Technology Co., Ltd */ 253 /* Beijing Huaqi Information Digital Technology Co., Ltd */
230 .name = "Huaqi DLCW-130", 254 .name = "Huaqi DLCW-130",
231 .valid = EM28XX_BOARD_NOT_VALIDATED, 255 .valid = EM28XX_BOARD_NOT_VALIDATED,
232 .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, 256 .xclk = EM28XX_XCLK_FREQUENCY_48MHZ,
233 .tuner_type = TUNER_ABSENT, /* This is a webcam */ 257 .tuner_type = TUNER_ABSENT,
258 .is_webcam = 1,
234 .input = { { 259 .input = { {
235 .type = EM28XX_VMUX_COMPOSITE1, 260 .type = EM28XX_VMUX_COMPOSITE1,
236 .vmux = 0, 261 .vmux = 0,
@@ -431,13 +456,25 @@ struct em28xx_board em28xx_boards[] = {
431 [EM2820_BOARD_VIDEOLOGY_20K14XUSB] = { 456 [EM2820_BOARD_VIDEOLOGY_20K14XUSB] = {
432 .name = "Videology 20K14XUSB USB2.0", 457 .name = "Videology 20K14XUSB USB2.0",
433 .valid = EM28XX_BOARD_NOT_VALIDATED, 458 .valid = EM28XX_BOARD_NOT_VALIDATED,
434 .tuner_type = TUNER_ABSENT, /* This is a webcam */ 459 .tuner_type = TUNER_ABSENT,
460 .is_webcam = 1,
435 .input = { { 461 .input = { {
436 .type = EM28XX_VMUX_COMPOSITE1, 462 .type = EM28XX_VMUX_COMPOSITE1,
437 .vmux = 0, 463 .vmux = 0,
438 .amux = EM28XX_AMUX_VIDEO, 464 .amux = EM28XX_AMUX_VIDEO,
439 } }, 465 } },
440 }, 466 },
467 [EM2820_BOARD_SILVERCREST_WEBCAM] = {
468 .name = "Silvercrest Webcam 1.3mpix",
469 .tuner_type = TUNER_ABSENT,
470 .is_webcam = 1,
471 .input = { {
472 .type = EM28XX_VMUX_COMPOSITE1,
473 .vmux = 0,
474 .amux = EM28XX_AMUX_VIDEO,
475 .gpio = silvercrest_reg_seq,
476 } },
477 },
441 [EM2821_BOARD_SUPERCOMP_USB_2] = { 478 [EM2821_BOARD_SUPERCOMP_USB_2] = {
442 .name = "Supercomp USB 2.0 TV", 479 .name = "Supercomp USB 2.0 TV",
443 .valid = EM28XX_BOARD_NOT_VALIDATED, 480 .valid = EM28XX_BOARD_NOT_VALIDATED,
@@ -479,7 +516,8 @@ struct em28xx_board em28xx_boards[] = {
479 /* Beijing Huaqi Information Digital Technology Co., Ltd */ 516 /* Beijing Huaqi Information Digital Technology Co., Ltd */
480 .name = "NetGMBH Cam", 517 .name = "NetGMBH Cam",
481 .valid = EM28XX_BOARD_NOT_VALIDATED, 518 .valid = EM28XX_BOARD_NOT_VALIDATED,
482 .tuner_type = TUNER_ABSENT, /* This is a webcam */ 519 .tuner_type = TUNER_ABSENT,
520 .is_webcam = 1,
483 .input = { { 521 .input = { {
484 .type = EM28XX_VMUX_COMPOSITE1, 522 .type = EM28XX_VMUX_COMPOSITE1,
485 .vmux = 0, 523 .vmux = 0,
@@ -826,7 +864,7 @@ struct em28xx_board em28xx_boards[] = {
826 .tuner_gpio = default_tuner_gpio, 864 .tuner_gpio = default_tuner_gpio,
827 .decoder = EM28XX_TVP5150, 865 .decoder = EM28XX_TVP5150,
828 .has_dvb = 1, 866 .has_dvb = 1,
829 .dvb_gpio = default_analog, 867 .dvb_gpio = default_digital,
830 .input = { { 868 .input = { {
831 .type = EM28XX_VMUX_TELEVISION, 869 .type = EM28XX_VMUX_TELEVISION,
832 .vmux = TVP5150_COMPOSITE0, 870 .vmux = TVP5150_COMPOSITE0,
@@ -1229,25 +1267,26 @@ struct em28xx_board em28xx_boards[] = {
1229 }, 1267 },
1230 [EM2881_BOARD_PINNACLE_HYBRID_PRO] = { 1268 [EM2881_BOARD_PINNACLE_HYBRID_PRO] = {
1231 .name = "Pinnacle Hybrid Pro", 1269 .name = "Pinnacle Hybrid Pro",
1232 .valid = EM28XX_BOARD_NOT_VALIDATED,
1233 .tuner_type = TUNER_XC2028, 1270 .tuner_type = TUNER_XC2028,
1234 .tuner_gpio = default_tuner_gpio, 1271 .tuner_gpio = default_tuner_gpio,
1235 .decoder = EM28XX_TVP5150, 1272 .decoder = EM28XX_TVP5150,
1273 .has_dvb = 1,
1274 .dvb_gpio = pinnacle_hybrid_pro_digital,
1236 .input = { { 1275 .input = { {
1237 .type = EM28XX_VMUX_TELEVISION, 1276 .type = EM28XX_VMUX_TELEVISION,
1238 .vmux = TVP5150_COMPOSITE0, 1277 .vmux = TVP5150_COMPOSITE0,
1239 .amux = EM28XX_AMUX_VIDEO, 1278 .amux = EM28XX_AMUX_VIDEO,
1240 .gpio = default_analog, 1279 .gpio = pinnacle_hybrid_pro_analog,
1241 }, { 1280 }, {
1242 .type = EM28XX_VMUX_COMPOSITE1, 1281 .type = EM28XX_VMUX_COMPOSITE1,
1243 .vmux = TVP5150_COMPOSITE1, 1282 .vmux = TVP5150_COMPOSITE1,
1244 .amux = EM28XX_AMUX_LINE_IN, 1283 .amux = EM28XX_AMUX_LINE_IN,
1245 .gpio = default_analog, 1284 .gpio = pinnacle_hybrid_pro_analog,
1246 }, { 1285 }, {
1247 .type = EM28XX_VMUX_SVIDEO, 1286 .type = EM28XX_VMUX_SVIDEO,
1248 .vmux = TVP5150_SVIDEO, 1287 .vmux = TVP5150_SVIDEO,
1249 .amux = EM28XX_AMUX_LINE_IN, 1288 .amux = EM28XX_AMUX_LINE_IN,
1250 .gpio = default_analog, 1289 .gpio = pinnacle_hybrid_pro_analog,
1251 } }, 1290 } },
1252 }, 1291 },
1253 [EM2882_BOARD_PINNACLE_HYBRID_PRO] = { 1292 [EM2882_BOARD_PINNACLE_HYBRID_PRO] = {
@@ -1617,6 +1656,7 @@ static struct em28xx_hash_table em28xx_eeprom_hash[] = {
1617 {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, 1656 {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028},
1618 {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028}, 1657 {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028},
1619 {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028}, 1658 {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028},
1659 {0xb8846b20, EM2881_BOARD_PINNACLE_HYBRID_PRO, TUNER_XC2028},
1620}; 1660};
1621 1661
1622/* I2C devicelist hash table for devices with generic USB IDs */ 1662/* I2C devicelist hash table for devices with generic USB IDs */
@@ -1639,6 +1679,11 @@ static unsigned short tvp5150_addrs[] = {
1639 I2C_CLIENT_END 1679 I2C_CLIENT_END
1640}; 1680};
1641 1681
1682static unsigned short mt9v011_addrs[] = {
1683 0xba >> 1,
1684 I2C_CLIENT_END
1685};
1686
1642static unsigned short msp3400_addrs[] = { 1687static unsigned short msp3400_addrs[] = {
1643 0x80 >> 1, 1688 0x80 >> 1,
1644 0x88 >> 1, 1689 0x88 >> 1,
@@ -1678,6 +1723,91 @@ static inline void em28xx_set_model(struct em28xx *dev)
1678 EM28XX_I2C_FREQ_100_KHZ; 1723 EM28XX_I2C_FREQ_100_KHZ;
1679} 1724}
1680 1725
1726/* FIXME: Should be replaced by a proper mt9m001 driver */
1727static int em28xx_initialize_mt9m001(struct em28xx *dev)
1728{
1729 int i;
1730 unsigned char regs[][3] = {
1731 { 0x0d, 0x00, 0x01, },
1732 { 0x0d, 0x00, 0x00, },
1733 { 0x04, 0x05, 0x00, }, /* hres = 1280 */
1734 { 0x03, 0x04, 0x00, }, /* vres = 1024 */
1735 { 0x20, 0x11, 0x00, },
1736 { 0x06, 0x00, 0x10, },
1737 { 0x2b, 0x00, 0x24, },
1738 { 0x2e, 0x00, 0x24, },
1739 { 0x35, 0x00, 0x24, },
1740 { 0x2d, 0x00, 0x20, },
1741 { 0x2c, 0x00, 0x20, },
1742 { 0x09, 0x0a, 0xd4, },
1743 { 0x35, 0x00, 0x57, },
1744 };
1745
1746 for (i = 0; i < ARRAY_SIZE(regs); i++)
1747 i2c_master_send(&dev->i2c_client, &regs[i][0], 3);
1748
1749 return 0;
1750}
1751
1752/* HINT method: webcam I2C chips
1753 *
1754 * This method work for webcams with Micron sensors
1755 */
1756static int em28xx_hint_sensor(struct em28xx *dev)
1757{
1758 int rc;
1759 char *sensor_name;
1760 unsigned char cmd;
1761 __be16 version_be;
1762 u16 version;
1763
1764 dev->i2c_client.addr = 0xba >> 1;
1765 cmd = 0;
1766 i2c_master_send(&dev->i2c_client, &cmd, 1);
1767 rc = i2c_master_recv(&dev->i2c_client, (char *)&version_be, 2);
1768 if (rc != 2)
1769 return -EINVAL;
1770
1771 version = be16_to_cpu(version_be);
1772
1773 switch (version) {
1774 case 0x8243: /* mt9v011 640x480 1.3 Mpix sensor */
1775 dev->model = EM2820_BOARD_SILVERCREST_WEBCAM;
1776 sensor_name = "mt9v011";
1777 dev->em28xx_sensor = EM28XX_MT9V011;
1778 dev->sensor_xres = 640;
1779 dev->sensor_yres = 480;
1780 dev->sensor_xtal = 6300000;
1781
1782 /* probably means GRGB 16 bit bayer */
1783 dev->vinmode = 0x0d;
1784 dev->vinctl = 0x00;
1785
1786 break;
1787 case 0x8431:
1788 dev->model = EM2750_BOARD_UNKNOWN;
1789 sensor_name = "mt9m001";
1790 dev->em28xx_sensor = EM28XX_MT9M001;
1791 em28xx_initialize_mt9m001(dev);
1792 dev->sensor_xres = 1280;
1793 dev->sensor_yres = 1024;
1794
1795 /* probably means BGGR 16 bit bayer */
1796 dev->vinmode = 0x0c;
1797 dev->vinctl = 0x00;
1798
1799 break;
1800 default:
1801 printk("Unknown Micron Sensor 0x%04x\n", be16_to_cpu(version));
1802 return -EINVAL;
1803 }
1804
1805 em28xx_errdev("Sensor is %s, using model %s entry.\n",
1806 sensor_name, em28xx_boards[dev->model].name);
1807
1808 return 0;
1809}
1810
1681/* Since em28xx_pre_card_setup() requires a proper dev->model, 1811/* Since em28xx_pre_card_setup() requires a proper dev->model,
1682 * this won't work for boards with generic PCI IDs 1812 * this won't work for boards with generic PCI IDs
1683 */ 1813 */
@@ -1706,7 +1836,7 @@ void em28xx_pre_card_setup(struct em28xx *dev)
1706 em28xx_info("chip ID is em2750\n"); 1836 em28xx_info("chip ID is em2750\n");
1707 break; 1837 break;
1708 case CHIP_ID_EM2820: 1838 case CHIP_ID_EM2820:
1709 em28xx_info("chip ID is em2820\n"); 1839 em28xx_info("chip ID is em2710 or em2820\n");
1710 break; 1840 break;
1711 case CHIP_ID_EM2840: 1841 case CHIP_ID_EM2840:
1712 em28xx_info("chip ID is em2840\n"); 1842 em28xx_info("chip ID is em2840\n");
@@ -1860,6 +1990,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
1860 ctl->demod = XC3028_FE_ZARLINK456; 1990 ctl->demod = XC3028_FE_ZARLINK456;
1861 break; 1991 break;
1862 case EM2880_BOARD_TERRATEC_HYBRID_XS: 1992 case EM2880_BOARD_TERRATEC_HYBRID_XS:
1993 case EM2881_BOARD_PINNACLE_HYBRID_PRO:
1863 ctl->demod = XC3028_FE_ZARLINK456; 1994 ctl->demod = XC3028_FE_ZARLINK456;
1864 break; 1995 break;
1865 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: 1996 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
@@ -2156,8 +2287,13 @@ void em28xx_card_setup(struct em28xx *dev)
2156 em28xx_set_mode() in em28xx_pre_card_setup() was a no-op, 2287 em28xx_set_mode() in em28xx_pre_card_setup() was a no-op,
2157 so make the call now so the analog GPIOs are set properly 2288 so make the call now so the analog GPIOs are set properly
2158 before probing the i2c bus. */ 2289 before probing the i2c bus. */
2290 em28xx_gpio_set(dev, dev->board.tuner_gpio);
2159 em28xx_set_mode(dev, EM28XX_ANALOG_MODE); 2291 em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
2160 break; 2292 break;
2293 case EM2820_BOARD_SILVERCREST_WEBCAM:
2294 /* FIXME: need to document the registers bellow */
2295 em28xx_write_reg(dev, 0x0d, 0x42);
2296 em28xx_write_reg(dev, 0x13, 0x08);
2161 } 2297 }
2162 2298
2163 if (dev->board.has_snapshot_button) 2299 if (dev->board.has_snapshot_button)
@@ -2189,6 +2325,15 @@ void em28xx_card_setup(struct em28xx *dev)
2189 v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap, 2325 v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap,
2190 "tvp5150", "tvp5150", tvp5150_addrs); 2326 "tvp5150", "tvp5150", tvp5150_addrs);
2191 2327
2328 if (dev->em28xx_sensor == EM28XX_MT9V011) {
2329 struct v4l2_subdev *sd;
2330
2331 sd = v4l2_i2c_new_probed_subdev(&dev->v4l2_dev,
2332 &dev->i2c_adap, "mt9v011", "mt9v011", mt9v011_addrs);
2333 v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal);
2334 }
2335
2336
2192 if (dev->board.adecoder == EM28XX_TVAUDIO) 2337 if (dev->board.adecoder == EM28XX_TVAUDIO)
2193 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 2338 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
2194 "tvaudio", "tvaudio", dev->board.tvaudio_addr); 2339 "tvaudio", "tvaudio", dev->board.tvaudio_addr);
@@ -2333,6 +2478,20 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
2333 return errCode; 2478 return errCode;
2334 } 2479 }
2335 2480
2481 /*
2482 * Default format, used for tvp5150 or saa711x output formats
2483 */
2484 dev->vinmode = 0x10;
2485 dev->vinctl = 0x11;
2486
2487 /*
2488 * If the device can be a webcam, seek for a sensor.
2489 * If sensor is not found, then it isn't a webcam.
2490 */
2491 if (dev->board.is_webcam)
2492 if (em28xx_hint_sensor(dev) < 0)
2493 dev->board.is_webcam = 0;
2494
2336 /* Do board specific init and eeprom reading */ 2495 /* Do board specific init and eeprom reading */
2337 em28xx_card_setup(dev); 2496 em28xx_card_setup(dev);
2338 2497
@@ -2573,6 +2732,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
2573 retval = em28xx_init_dev(&dev, udev, interface, nr); 2732 retval = em28xx_init_dev(&dev, udev, interface, nr);
2574 if (retval) { 2733 if (retval) {
2575 em28xx_devused &= ~(1<<dev->devno); 2734 em28xx_devused &= ~(1<<dev->devno);
2735 mutex_unlock(&dev->lock);
2576 kfree(dev); 2736 kfree(dev);
2577 goto err; 2737 goto err;
2578 } 2738 }
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index c8d7ce8fbd36..5b78e199abd1 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -650,15 +650,15 @@ int em28xx_set_outfmt(struct em28xx *dev)
650 int ret; 650 int ret;
651 651
652 ret = em28xx_write_reg_bits(dev, EM28XX_R27_OUTFMT, 652 ret = em28xx_write_reg_bits(dev, EM28XX_R27_OUTFMT,
653 dev->format->reg | 0x20, 0x3f); 653 dev->format->reg | 0x20, 0xff);
654 if (ret < 0) 654 if (ret < 0)
655 return ret; 655 return ret;
656 656
657 ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, 0x10); 657 ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode);
658 if (ret < 0) 658 if (ret < 0)
659 return ret; 659 return ret;
660 660
661 return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, 0x11); 661 return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, dev->vinctl);
662} 662}
663 663
664static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax, 664static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
@@ -695,13 +695,16 @@ static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
695{ 695{
696 u8 mode; 696 u8 mode;
697 /* the em2800 scaler only supports scaling down to 50% */ 697 /* the em2800 scaler only supports scaling down to 50% */
698 if (dev->board.is_em2800) 698
699 if (dev->board.is_em2800) {
699 mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00); 700 mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00);
700 else { 701 } else {
701 u8 buf[2]; 702 u8 buf[2];
703
702 buf[0] = h; 704 buf[0] = h;
703 buf[1] = h >> 8; 705 buf[1] = h >> 8;
704 em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2); 706 em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2);
707
705 buf[0] = v; 708 buf[0] = v;
706 buf[1] = v >> 8; 709 buf[1] = v >> 8;
707 em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2); 710 em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2);
@@ -720,8 +723,11 @@ int em28xx_resolution_set(struct em28xx *dev)
720 height = norm_maxh(dev) >> 1; 723 height = norm_maxh(dev) >> 1;
721 724
722 em28xx_set_outfmt(dev); 725 em28xx_set_outfmt(dev);
726
727
723 em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2); 728 em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
724 em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2); 729 em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2);
730
725 return em28xx_scaler_set(dev, dev->hscale, dev->vscale); 731 return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
726} 732}
727 733
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index e7b47c8da8f3..cf0ac7f2a30d 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -31,6 +31,8 @@
31#include "lgdt330x.h" 31#include "lgdt330x.h"
32#include "zl10353.h" 32#include "zl10353.h"
33#include "s5h1409.h" 33#include "s5h1409.h"
34#include "mt352.h"
35#include "mt352_priv.h" /* FIXME */
34 36
35MODULE_DESCRIPTION("driver for em28xx based DVB cards"); 37MODULE_DESCRIPTION("driver for em28xx based DVB cards");
36MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); 38MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
@@ -243,6 +245,14 @@ static struct s5h1409_config em28xx_s5h1409_with_xc3028 = {
243 .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK 245 .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
244}; 246};
245 247
248static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = {
249 .demod_address = (0x1e >> 1),
250 .no_tuner = 1,
251 .disable_i2c_gate_ctrl = 1,
252 .parallel_ts = 1,
253 .if2 = 45600,
254};
255
246#ifdef EM28XX_DRX397XD_SUPPORT 256#ifdef EM28XX_DRX397XD_SUPPORT
247/* [TODO] djh - not sure yet what the device config needs to contain */ 257/* [TODO] djh - not sure yet what the device config needs to contain */
248static struct drx397xD_config em28xx_drx397xD_with_xc3028 = { 258static struct drx397xD_config em28xx_drx397xD_with_xc3028 = {
@@ -250,6 +260,41 @@ static struct drx397xD_config em28xx_drx397xD_with_xc3028 = {
250}; 260};
251#endif 261#endif
252 262
263static int mt352_terratec_xs_init(struct dvb_frontend *fe)
264{
265 /* Values extracted from a USB trace of the Terratec Windows driver */
266 static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x2c };
267 static u8 reset[] = { RESET, 0x80 };
268 static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 };
269 static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0xa0 };
270 static u8 input_freq_cfg[] = { INPUT_FREQ_1, 0x31, 0xb8 };
271 static u8 rs_err_cfg[] = { RS_ERR_PER_1, 0x00, 0x4d };
272 static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
273 static u8 trl_nom_cfg[] = { TRL_NOMINAL_RATE_1, 0x64, 0x00 };
274 static u8 tps_given_cfg[] = { TPS_GIVEN_1, 0x40, 0x80, 0x50 };
275 static u8 tuner_go[] = { TUNER_GO, 0x01};
276
277 mt352_write(fe, clock_config, sizeof(clock_config));
278 udelay(200);
279 mt352_write(fe, reset, sizeof(reset));
280 mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
281 mt352_write(fe, agc_cfg, sizeof(agc_cfg));
282 mt352_write(fe, input_freq_cfg, sizeof(input_freq_cfg));
283 mt352_write(fe, rs_err_cfg, sizeof(rs_err_cfg));
284 mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
285 mt352_write(fe, trl_nom_cfg, sizeof(trl_nom_cfg));
286 mt352_write(fe, tps_given_cfg, sizeof(tps_given_cfg));
287 mt352_write(fe, tuner_go, sizeof(tuner_go));
288 return 0;
289}
290
291static struct mt352_config terratec_xs_mt352_cfg = {
292 .demod_address = (0x1e >> 1),
293 .no_tuner = 1,
294 .if2 = 45600,
295 .demod_init = mt352_terratec_xs_init,
296};
297
253/* ------------------------------------------------------------------ */ 298/* ------------------------------------------------------------------ */
254 299
255static int attach_xc3028(u8 addr, struct em28xx *dev) 300static int attach_xc3028(u8 addr, struct em28xx *dev)
@@ -432,8 +477,6 @@ static int dvb_init(struct em28xx *dev)
432 goto out_free; 477 goto out_free;
433 } 478 }
434 break; 479 break;
435 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
436 case EM2880_BOARD_TERRATEC_HYBRID_XS:
437 case EM2880_BOARD_KWORLD_DVB_310U: 480 case EM2880_BOARD_KWORLD_DVB_310U:
438 case EM2880_BOARD_EMPIRE_DUAL_TV: 481 case EM2880_BOARD_EMPIRE_DUAL_TV:
439 dvb->frontend = dvb_attach(zl10353_attach, 482 dvb->frontend = dvb_attach(zl10353_attach,
@@ -444,6 +487,33 @@ static int dvb_init(struct em28xx *dev)
444 goto out_free; 487 goto out_free;
445 } 488 }
446 break; 489 break;
490 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
491 dvb->frontend = dvb_attach(zl10353_attach,
492 &em28xx_zl10353_xc3028_no_i2c_gate,
493 &dev->i2c_adap);
494 if (attach_xc3028(0x61, dev) < 0) {
495 result = -EINVAL;
496 goto out_free;
497 }
498 break;
499 case EM2880_BOARD_TERRATEC_HYBRID_XS:
500 case EM2881_BOARD_PINNACLE_HYBRID_PRO:
501 dvb->frontend = dvb_attach(zl10353_attach,
502 &em28xx_zl10353_xc3028_no_i2c_gate,
503 &dev->i2c_adap);
504 if (dvb->frontend == NULL) {
505 /* This board could have either a zl10353 or a mt352.
506 If the chip id isn't for zl10353, try mt352 */
507 dvb->frontend = dvb_attach(mt352_attach,
508 &terratec_xs_mt352_cfg,
509 &dev->i2c_adap);
510 }
511
512 if (attach_xc3028(0x61, dev) < 0) {
513 result = -EINVAL;
514 goto out_free;
515 }
516 break;
447 case EM2883_BOARD_KWORLD_HYBRID_330U: 517 case EM2883_BOARD_KWORLD_HYBRID_330U:
448 case EM2882_BOARD_EVGA_INDTUBE: 518 case EM2882_BOARD_EVGA_INDTUBE:
449 dvb->frontend = dvb_attach(s5h1409_attach, 519 dvb->frontend = dvb_attach(s5h1409_attach,
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 2c86fcf089f5..27e33a287dfc 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -483,7 +483,7 @@ static char *i2c_devs[128] = {
483 [0xa0 >> 1] = "eeprom", 483 [0xa0 >> 1] = "eeprom",
484 [0xb0 >> 1] = "tda9874", 484 [0xb0 >> 1] = "tda9874",
485 [0xb8 >> 1] = "tvp5150a", 485 [0xb8 >> 1] = "tvp5150a",
486 [0xba >> 1] = "tvp5150a", 486 [0xba >> 1] = "webcam sensor or tvp5150a",
487 [0xc0 >> 1] = "tuner (analog)", 487 [0xc0 >> 1] = "tuner (analog)",
488 [0xc2 >> 1] = "tuner (analog)", 488 [0xc2 >> 1] = "tuner (analog)",
489 [0xc4 >> 1] = "tuner (analog)", 489 [0xc4 >> 1] = "tuner (analog)",
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 8fe1beecfffa..ff37b4c15f44 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -90,10 +90,35 @@ MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
90/* supported video standards */ 90/* supported video standards */
91static struct em28xx_fmt format[] = { 91static struct em28xx_fmt format[] = {
92 { 92 {
93 .name = "16bpp YUY2, 4:2:2, packed", 93 .name = "16 bpp YUY2, 4:2:2, packed",
94 .fourcc = V4L2_PIX_FMT_YUYV, 94 .fourcc = V4L2_PIX_FMT_YUYV,
95 .depth = 16, 95 .depth = 16,
96 .reg = EM28XX_OUTFMT_YUV422_Y0UY1V, 96 .reg = EM28XX_OUTFMT_YUV422_Y0UY1V,
97 }, {
98 .name = "16 bpp RGB 565, LE",
99 .fourcc = V4L2_PIX_FMT_RGB565,
100 .depth = 16,
101 .reg = EM28XX_OUTFMT_RGB_16_656,
102 }, {
103 .name = "8 bpp Bayer BGBG..GRGR",
104 .fourcc = V4L2_PIX_FMT_SBGGR8,
105 .depth = 8,
106 .reg = EM28XX_OUTFMT_RGB_8_BGBG,
107 }, {
108 .name = "8 bpp Bayer GRGR..BGBG",
109 .fourcc = V4L2_PIX_FMT_SGRBG8,
110 .depth = 8,
111 .reg = EM28XX_OUTFMT_RGB_8_GRGR,
112 }, {
113 .name = "8 bpp Bayer GBGB..RGRG",
114 .fourcc = V4L2_PIX_FMT_SGBRG8,
115 .depth = 8,
116 .reg = EM28XX_OUTFMT_RGB_8_GBGB,
117 }, {
118 .name = "12 bpp YUV411",
119 .fourcc = V4L2_PIX_FMT_YUV411P,
120 .depth = 12,
121 .reg = EM28XX_OUTFMT_YUV411,
97 }, 122 },
98}; 123};
99 124
@@ -632,8 +657,8 @@ static void get_scale(struct em28xx *dev,
632 unsigned int width, unsigned int height, 657 unsigned int width, unsigned int height,
633 unsigned int *hscale, unsigned int *vscale) 658 unsigned int *hscale, unsigned int *vscale)
634{ 659{
635 unsigned int maxw = norm_maxw(dev); 660 unsigned int maxw = norm_maxw(dev);
636 unsigned int maxh = norm_maxh(dev); 661 unsigned int maxh = norm_maxh(dev);
637 662
638 *hscale = (((unsigned long)maxw) << 12) / width - 4096L; 663 *hscale = (((unsigned long)maxw) << 12) / width - 4096L;
639 if (*hscale >= 0x4000) 664 if (*hscale >= 0x4000)
@@ -733,13 +758,34 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
733 return 0; 758 return 0;
734} 759}
735 760
761static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
762 unsigned width, unsigned height)
763{
764 struct em28xx_fmt *fmt;
765
766 fmt = format_by_fourcc(fourcc);
767 if (!fmt)
768 return -EINVAL;
769
770 dev->format = fmt;
771 dev->width = width;
772 dev->height = height;
773
774 /* set new image size */
775 get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
776
777 em28xx_set_alternate(dev);
778 em28xx_resolution_set(dev);
779
780 return 0;
781}
782
736static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 783static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
737 struct v4l2_format *f) 784 struct v4l2_format *f)
738{ 785{
739 struct em28xx_fh *fh = priv; 786 struct em28xx_fh *fh = priv;
740 struct em28xx *dev = fh->dev; 787 struct em28xx *dev = fh->dev;
741 int rc; 788 int rc;
742 struct em28xx_fmt *fmt;
743 789
744 rc = check_dev(dev); 790 rc = check_dev(dev);
745 if (rc < 0) 791 if (rc < 0)
@@ -749,12 +795,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
749 795
750 vidioc_try_fmt_vid_cap(file, priv, f); 796 vidioc_try_fmt_vid_cap(file, priv, f);
751 797
752 fmt = format_by_fourcc(f->fmt.pix.pixelformat);
753 if (!fmt) {
754 rc = -EINVAL;
755 goto out;
756 }
757
758 if (videobuf_queue_is_busy(&fh->vb_vidq)) { 798 if (videobuf_queue_is_busy(&fh->vb_vidq)) {
759 em28xx_errdev("%s queue busy\n", __func__); 799 em28xx_errdev("%s queue busy\n", __func__);
760 rc = -EBUSY; 800 rc = -EBUSY;
@@ -767,16 +807,8 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
767 goto out; 807 goto out;
768 } 808 }
769 809
770 /* set new image size */ 810 rc = em28xx_set_video_format(dev, f->fmt.pix.pixelformat,
771 dev->width = f->fmt.pix.width; 811 f->fmt.pix.width, f->fmt.pix.height);
772 dev->height = f->fmt.pix.height;
773 dev->format = fmt;
774 get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
775
776 em28xx_set_alternate(dev);
777 em28xx_resolution_set(dev);
778
779 rc = 0;
780 812
781out: 813out:
782 mutex_unlock(&dev->lock); 814 mutex_unlock(&dev->lock);
@@ -1616,11 +1648,6 @@ static int em28xx_v4l2_open(struct file *filp)
1616 filp->private_data = fh; 1648 filp->private_data = fh;
1617 1649
1618 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) { 1650 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
1619 dev->width = norm_maxw(dev);
1620 dev->height = norm_maxh(dev);
1621 dev->hscale = 0;
1622 dev->vscale = 0;
1623
1624 em28xx_set_mode(dev, EM28XX_ANALOG_MODE); 1651 em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
1625 em28xx_set_alternate(dev); 1652 em28xx_set_alternate(dev);
1626 em28xx_resolution_set(dev); 1653 em28xx_resolution_set(dev);
@@ -1962,15 +1989,14 @@ int em28xx_register_analog_devices(struct em28xx *dev)
1962 1989
1963 /* set default norm */ 1990 /* set default norm */
1964 dev->norm = em28xx_video_template.current_norm; 1991 dev->norm = em28xx_video_template.current_norm;
1965 dev->width = norm_maxw(dev);
1966 dev->height = norm_maxh(dev);
1967 dev->interlaced = EM28XX_INTERLACED_DEFAULT; 1992 dev->interlaced = EM28XX_INTERLACED_DEFAULT;
1968 dev->hscale = 0;
1969 dev->vscale = 0;
1970 dev->ctl_input = 0; 1993 dev->ctl_input = 0;
1971 1994
1972 /* Analog specific initialization */ 1995 /* Analog specific initialization */
1973 dev->format = &format[0]; 1996 dev->format = &format[0];
1997 em28xx_set_video_format(dev, format[0].fourcc,
1998 norm_maxw(dev), norm_maxh(dev));
1999
1974 video_mux(dev, dev->ctl_input); 2000 video_mux(dev, dev->ctl_input);
1975 2001
1976 /* Audio defaults */ 2002 /* Audio defaults */
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 813ce45c2f99..45bd513f62dc 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -107,6 +107,7 @@
107#define EM2860_BOARD_TERRATEC_AV350 68 107#define EM2860_BOARD_TERRATEC_AV350 68
108#define EM2882_BOARD_KWORLD_ATSC_315U 69 108#define EM2882_BOARD_KWORLD_ATSC_315U 69
109#define EM2882_BOARD_EVGA_INDTUBE 70 109#define EM2882_BOARD_EVGA_INDTUBE 70
110#define EM2820_BOARD_SILVERCREST_WEBCAM 71
110 111
111/* Limits minimum and default number of buffers */ 112/* Limits minimum and default number of buffers */
112#define EM28XX_MIN_BUF 4 113#define EM28XX_MIN_BUF 4
@@ -357,11 +358,17 @@ struct em28xx_input {
357#define INPUT(nr) (&em28xx_boards[dev->model].input[nr]) 358#define INPUT(nr) (&em28xx_boards[dev->model].input[nr])
358 359
359enum em28xx_decoder { 360enum em28xx_decoder {
360 EM28XX_NODECODER, 361 EM28XX_NODECODER = 0,
361 EM28XX_TVP5150, 362 EM28XX_TVP5150,
362 EM28XX_SAA711X, 363 EM28XX_SAA711X,
363}; 364};
364 365
366enum em28xx_sensor {
367 EM28XX_NOSENSOR = 0,
368 EM28XX_MT9V011,
369 EM28XX_MT9M001,
370};
371
365enum em28xx_adecoder { 372enum em28xx_adecoder {
366 EM28XX_NOADECODER = 0, 373 EM28XX_NOADECODER = 0,
367 EM28XX_TVAUDIO, 374 EM28XX_TVAUDIO,
@@ -388,6 +395,7 @@ struct em28xx_board {
388 unsigned int max_range_640_480:1; 395 unsigned int max_range_640_480:1;
389 unsigned int has_dvb:1; 396 unsigned int has_dvb:1;
390 unsigned int has_snapshot_button:1; 397 unsigned int has_snapshot_button:1;
398 unsigned int is_webcam:1;
391 unsigned int valid:1; 399 unsigned int valid:1;
392 400
393 unsigned char xclk, i2c_speed; 401 unsigned char xclk, i2c_speed;
@@ -471,6 +479,14 @@ struct em28xx {
471 struct v4l2_device v4l2_dev; 479 struct v4l2_device v4l2_dev;
472 struct em28xx_board board; 480 struct em28xx_board board;
473 481
482 /* Webcam specific fields */
483 enum em28xx_sensor em28xx_sensor;
484 int sensor_xres, sensor_yres;
485 int sensor_xtal;
486
487 /* Vinmode/Vinctl used at the driver */
488 int vinmode, vinctl;
489
474 unsigned int stream_on:1; /* Locks streams */ 490 unsigned int stream_on:1; /* Locks streams */
475 unsigned int has_audio_class:1; 491 unsigned int has_audio_class:1;
476 unsigned int has_alsa_audio:1; 492 unsigned int has_alsa_audio:1;
@@ -751,17 +767,23 @@ static inline int em28xx_gamma_set(struct em28xx *dev, s32 val)
751/*FIXME: maxw should be dependent of alt mode */ 767/*FIXME: maxw should be dependent of alt mode */
752static inline unsigned int norm_maxw(struct em28xx *dev) 768static inline unsigned int norm_maxw(struct em28xx *dev)
753{ 769{
770 if (dev->board.is_webcam)
771 return dev->sensor_xres;
772
754 if (dev->board.max_range_640_480) 773 if (dev->board.max_range_640_480)
755 return 640; 774 return 640;
756 else 775
757 return 720; 776 return 720;
758} 777}
759 778
760static inline unsigned int norm_maxh(struct em28xx *dev) 779static inline unsigned int norm_maxh(struct em28xx *dev)
761{ 780{
781 if (dev->board.is_webcam)
782 return dev->sensor_yres;
783
762 if (dev->board.max_range_640_480) 784 if (dev->board.max_range_640_480)
763 return 480; 785 return 480;
764 else 786
765 return (dev->norm & V4L2_STD_625_50) ? 576 : 480; 787 return (dev->norm & V4L2_STD_625_50) ? 576 : 480;
766} 788}
767#endif 789#endif
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 578dc4ffc965..34f46f2bc040 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -102,6 +102,22 @@ config USB_GSPCA_PAC7311
102 To compile this driver as a module, choose M here: the 102 To compile this driver as a module, choose M here: the
103 module will be called gspca_pac7311. 103 module will be called gspca_pac7311.
104 104
105config USB_GSPCA_SN9C20X
106 tristate "SN9C20X USB Camera Driver"
107 depends on VIDEO_V4L2 && USB_GSPCA
108 help
109 Say Y here if you want support for cameras based on the
110 sn9c20x chips (SN9C201 and SN9C202).
111
112 To compile this driver as a module, choose M here: the
113 module will be called gspca_sn9c20x.
114
115config USB_GSPCA_SN9C20X_EVDEV
116 bool "Enable evdev support"
117 depends on USB_GSPCA_SN9C20X
118 ---help---
119 Say Y here in order to enable evdev support for sn9c20x webcam button.
120
105config USB_GSPCA_SONIXB 121config USB_GSPCA_SONIXB
106 tristate "SONIX Bayer USB Camera Driver" 122 tristate "SONIX Bayer USB Camera Driver"
107 depends on VIDEO_V4L2 && USB_GSPCA 123 depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index 8a6643e8eb96..f6d3b86e9ad5 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o
8obj-$(CONFIG_USB_GSPCA_OV534) += gspca_ov534.o 8obj-$(CONFIG_USB_GSPCA_OV534) += gspca_ov534.o
9obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o 9obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o
10obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o 10obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o
11obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o
11obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o 12obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o
12obj-$(CONFIG_USB_GSPCA_SONIXJ) += gspca_sonixj.o 13obj-$(CONFIG_USB_GSPCA_SONIXJ) += gspca_sonixj.o
13obj-$(CONFIG_USB_GSPCA_SPCA500) += gspca_spca500.o 14obj-$(CONFIG_USB_GSPCA_SPCA500) += gspca_spca500.o
@@ -35,6 +36,7 @@ gspca_ov519-objs := ov519.o
35gspca_ov534-objs := ov534.o 36gspca_ov534-objs := ov534.o
36gspca_pac207-objs := pac207.o 37gspca_pac207-objs := pac207.o
37gspca_pac7311-objs := pac7311.o 38gspca_pac7311-objs := pac7311.o
39gspca_sn9c20x-objs := sn9c20x.o
38gspca_sonixb-objs := sonixb.o 40gspca_sonixb-objs := sonixb.o
39gspca_sonixj-objs := sonixj.o 41gspca_sonixj-objs := sonixj.o
40gspca_spca500-objs := spca500.o 42gspca_spca500-objs := spca500.o
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 219cfa6fb877..8d48ea1742c2 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -846,6 +846,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
846 846
847 /* create the JPEG header */ 847 /* create the JPEG header */
848 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); 848 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
849 if (!sd->jpeg_hdr)
850 return -ENOMEM;
849 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 851 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
850 0x22); /* JPEG 411 */ 852 0x22); /* JPEG 411 */
851 jpeg_set_qual(sd->jpeg_hdr, sd->quality); 853 jpeg_set_qual(sd->jpeg_hdr, sd->quality);
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 1e89600986c8..b8561dfb6c8c 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -727,6 +727,74 @@ static int gspca_get_mode(struct gspca_dev *gspca_dev,
727 return -EINVAL; 727 return -EINVAL;
728} 728}
729 729
730#ifdef CONFIG_VIDEO_ADV_DEBUG
731static int vidioc_g_register(struct file *file, void *priv,
732 struct v4l2_dbg_register *reg)
733{
734 int ret;
735 struct gspca_dev *gspca_dev = priv;
736
737 if (!gspca_dev->sd_desc->get_chip_ident)
738 return -EINVAL;
739
740 if (!gspca_dev->sd_desc->get_register)
741 return -EINVAL;
742
743 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
744 return -ERESTARTSYS;
745 if (gspca_dev->present)
746 ret = gspca_dev->sd_desc->get_register(gspca_dev, reg);
747 else
748 ret = -ENODEV;
749 mutex_unlock(&gspca_dev->usb_lock);
750
751 return ret;
752}
753
754static int vidioc_s_register(struct file *file, void *priv,
755 struct v4l2_dbg_register *reg)
756{
757 int ret;
758 struct gspca_dev *gspca_dev = priv;
759
760 if (!gspca_dev->sd_desc->get_chip_ident)
761 return -EINVAL;
762
763 if (!gspca_dev->sd_desc->set_register)
764 return -EINVAL;
765
766 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
767 return -ERESTARTSYS;
768 if (gspca_dev->present)
769 ret = gspca_dev->sd_desc->set_register(gspca_dev, reg);
770 else
771 ret = -ENODEV;
772 mutex_unlock(&gspca_dev->usb_lock);
773
774 return ret;
775}
776#endif
777
778static int vidioc_g_chip_ident(struct file *file, void *priv,
779 struct v4l2_dbg_chip_ident *chip)
780{
781 int ret;
782 struct gspca_dev *gspca_dev = priv;
783
784 if (!gspca_dev->sd_desc->get_chip_ident)
785 return -EINVAL;
786
787 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
788 return -ERESTARTSYS;
789 if (gspca_dev->present)
790 ret = gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip);
791 else
792 ret = -ENODEV;
793 mutex_unlock(&gspca_dev->usb_lock);
794
795 return ret;
796}
797
730static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, 798static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
731 struct v4l2_fmtdesc *fmtdesc) 799 struct v4l2_fmtdesc *fmtdesc)
732{ 800{
@@ -1883,6 +1951,11 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = {
1883 .vidioc_s_parm = vidioc_s_parm, 1951 .vidioc_s_parm = vidioc_s_parm,
1884 .vidioc_s_std = vidioc_s_std, 1952 .vidioc_s_std = vidioc_s_std,
1885 .vidioc_enum_framesizes = vidioc_enum_framesizes, 1953 .vidioc_enum_framesizes = vidioc_enum_framesizes,
1954#ifdef CONFIG_VIDEO_ADV_DEBUG
1955 .vidioc_g_register = vidioc_g_register,
1956 .vidioc_s_register = vidioc_s_register,
1957#endif
1958 .vidioc_g_chip_ident = vidioc_g_chip_ident,
1886#ifdef CONFIG_VIDEO_V4L1_COMPAT 1959#ifdef CONFIG_VIDEO_V4L1_COMPAT
1887 .vidiocgmbuf = vidiocgmbuf, 1960 .vidiocgmbuf = vidiocgmbuf,
1888#endif 1961#endif
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index bd1faff88644..46c4effdfcd5 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -69,6 +69,10 @@ typedef void (*cam_v_op) (struct gspca_dev *);
69typedef int (*cam_cf_op) (struct gspca_dev *, const struct usb_device_id *); 69typedef int (*cam_cf_op) (struct gspca_dev *, const struct usb_device_id *);
70typedef int (*cam_jpg_op) (struct gspca_dev *, 70typedef int (*cam_jpg_op) (struct gspca_dev *,
71 struct v4l2_jpegcompression *); 71 struct v4l2_jpegcompression *);
72typedef int (*cam_reg_op) (struct gspca_dev *,
73 struct v4l2_dbg_register *);
74typedef int (*cam_ident_op) (struct gspca_dev *,
75 struct v4l2_dbg_chip_ident *);
72typedef int (*cam_streamparm_op) (struct gspca_dev *, 76typedef int (*cam_streamparm_op) (struct gspca_dev *,
73 struct v4l2_streamparm *); 77 struct v4l2_streamparm *);
74typedef int (*cam_qmnu_op) (struct gspca_dev *, 78typedef int (*cam_qmnu_op) (struct gspca_dev *,
@@ -105,6 +109,11 @@ struct sd_desc {
105 cam_qmnu_op querymenu; 109 cam_qmnu_op querymenu;
106 cam_streamparm_op get_streamparm; 110 cam_streamparm_op get_streamparm;
107 cam_streamparm_op set_streamparm; 111 cam_streamparm_op set_streamparm;
112#ifdef CONFIG_VIDEO_ADV_DEBUG
113 cam_reg_op set_register;
114 cam_reg_op get_register;
115#endif
116 cam_ident_op get_chip_ident;
108}; 117};
109 118
110/* packet types when moving from iso buf to frame buf */ 119/* packet types when moving from iso buf to frame buf */
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index 191bcd718979..0163903d1c0f 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -476,9 +476,6 @@ static int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
476 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 476 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
477 if (err < 0) 477 if (err < 0)
478 return err; 478 return err;
479 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
480 if (err < 0)
481 return err;
482 479
483 err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); 480 err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1);
484 if (err < 0) 481 if (err < 0)
@@ -524,9 +521,6 @@ static int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
524 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 521 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
525 if (err < 0) 522 if (err < 0)
526 return err; 523 return err;
527 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
528 if (err < 0)
529 return err;
530 524
531 err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); 525 err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1);
532 if (err < 0) 526 if (err < 0)
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index 75e8d14e4ac7..de769caf013d 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -201,6 +201,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
201 201
202 /* create the JPEG header */ 202 /* create the JPEG header */
203 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); 203 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
204 if (!sd->jpeg_hdr)
205 return -ENOMEM;
204 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 206 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
205 0x21); /* JPEG 422 */ 207 0x21); /* JPEG 422 */
206 jpeg_set_qual(sd->jpeg_hdr, sd->quality); 208 jpeg_set_qual(sd->jpeg_hdr, sd->quality);
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
new file mode 100644
index 000000000000..fcfbbd329b4c
--- /dev/null
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -0,0 +1,2434 @@
1/*
2 * Sonix sn9c201 sn9c202 library
3 * Copyright (C) 2008-2009 microdia project <microdia@googlegroups.com>
4 * Copyright (C) 2009 Brian Johnson <brijohn@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
22#include <linux/kthread.h>
23#include <linux/freezer.h>
24#include <linux/usb/input.h>
25#include <linux/input.h>
26#endif
27
28#include "gspca.h"
29#include "jpeg.h"
30
31#include <media/v4l2-chip-ident.h>
32
33MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, "
34 "microdia project <microdia@googlegroups.com>");
35MODULE_DESCRIPTION("GSPCA/SN9C20X USB Camera Driver");
36MODULE_LICENSE("GPL");
37
38#define MODULE_NAME "sn9c20x"
39
40#define MODE_RAW 0x10
41#define MODE_JPEG 0x20
42#define MODE_SXGA 0x80
43
44#define SENSOR_OV9650 0
45#define SENSOR_OV9655 1
46#define SENSOR_SOI968 2
47#define SENSOR_OV7660 3
48#define SENSOR_OV7670 4
49#define SENSOR_MT9V011 5
50#define SENSOR_MT9V111 6
51#define SENSOR_MT9V112 7
52#define SENSOR_MT9M001 8
53#define SENSOR_MT9M111 9
54#define SENSOR_HV7131R 10
55#define SENSOR_MT9VPRB 20
56
57/* specific webcam descriptor */
58struct sd {
59 struct gspca_dev gspca_dev;
60
61#define MIN_AVG_LUM 80
62#define MAX_AVG_LUM 130
63 atomic_t avg_lum;
64 u8 old_step;
65 u8 older_step;
66 u8 exposure_step;
67
68 u8 brightness;
69 u8 contrast;
70 u8 saturation;
71 s16 hue;
72 u8 gamma;
73 u8 red;
74 u8 blue;
75
76 u8 hflip;
77 u8 vflip;
78 u8 gain;
79 u16 exposure;
80 u8 auto_exposure;
81
82 u8 i2c_addr;
83 u8 sensor;
84 u8 hstart;
85 u8 vstart;
86
87 u8 *jpeg_hdr;
88 u8 quality;
89
90#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
91 struct input_dev *input_dev;
92 u8 input_gpio;
93 struct task_struct *input_task;
94#endif
95};
96
97static int sd_setbrightness(struct gspca_dev *gspca_dev, s32 val);
98static int sd_getbrightness(struct gspca_dev *gspca_dev, s32 *val);
99static int sd_setcontrast(struct gspca_dev *gspca_dev, s32 val);
100static int sd_getcontrast(struct gspca_dev *gspca_dev, s32 *val);
101static int sd_setsaturation(struct gspca_dev *gspca_dev, s32 val);
102static int sd_getsaturation(struct gspca_dev *gspca_dev, s32 *val);
103static int sd_sethue(struct gspca_dev *gspca_dev, s32 val);
104static int sd_gethue(struct gspca_dev *gspca_dev, s32 *val);
105static int sd_setgamma(struct gspca_dev *gspca_dev, s32 val);
106static int sd_getgamma(struct gspca_dev *gspca_dev, s32 *val);
107static int sd_setredbalance(struct gspca_dev *gspca_dev, s32 val);
108static int sd_getredbalance(struct gspca_dev *gspca_dev, s32 *val);
109static int sd_setbluebalance(struct gspca_dev *gspca_dev, s32 val);
110static int sd_getbluebalance(struct gspca_dev *gspca_dev, s32 *val);
111static int sd_setvflip(struct gspca_dev *gspca_dev, s32 val);
112static int sd_getvflip(struct gspca_dev *gspca_dev, s32 *val);
113static int sd_sethflip(struct gspca_dev *gspca_dev, s32 val);
114static int sd_gethflip(struct gspca_dev *gspca_dev, s32 *val);
115static int sd_setgain(struct gspca_dev *gspca_dev, s32 val);
116static int sd_getgain(struct gspca_dev *gspca_dev, s32 *val);
117static int sd_setexposure(struct gspca_dev *gspca_dev, s32 val);
118static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val);
119static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val);
120static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val);
121
122static struct ctrl sd_ctrls[] = {
123 {
124#define BRIGHTNESS_IDX 0
125 {
126 .id = V4L2_CID_BRIGHTNESS,
127 .type = V4L2_CTRL_TYPE_INTEGER,
128 .name = "Brightness",
129 .minimum = 0,
130 .maximum = 0xff,
131 .step = 1,
132#define BRIGHTNESS_DEFAULT 0x7f
133 .default_value = BRIGHTNESS_DEFAULT,
134 },
135 .set = sd_setbrightness,
136 .get = sd_getbrightness,
137 },
138 {
139#define CONTRAST_IDX 1
140 {
141 .id = V4L2_CID_CONTRAST,
142 .type = V4L2_CTRL_TYPE_INTEGER,
143 .name = "Contrast",
144 .minimum = 0,
145 .maximum = 0xff,
146 .step = 1,
147#define CONTRAST_DEFAULT 0x7f
148 .default_value = CONTRAST_DEFAULT,
149 },
150 .set = sd_setcontrast,
151 .get = sd_getcontrast,
152 },
153 {
154#define SATURATION_IDX 2
155 {
156 .id = V4L2_CID_SATURATION,
157 .type = V4L2_CTRL_TYPE_INTEGER,
158 .name = "Saturation",
159 .minimum = 0,
160 .maximum = 0xff,
161 .step = 1,
162#define SATURATION_DEFAULT 0x7f
163 .default_value = SATURATION_DEFAULT,
164 },
165 .set = sd_setsaturation,
166 .get = sd_getsaturation,
167 },
168 {
169#define HUE_IDX 3
170 {
171 .id = V4L2_CID_HUE,
172 .type = V4L2_CTRL_TYPE_INTEGER,
173 .name = "Hue",
174 .minimum = -180,
175 .maximum = 180,
176 .step = 1,
177#define HUE_DEFAULT 0
178 .default_value = HUE_DEFAULT,
179 },
180 .set = sd_sethue,
181 .get = sd_gethue,
182 },
183 {
184#define GAMMA_IDX 4
185 {
186 .id = V4L2_CID_GAMMA,
187 .type = V4L2_CTRL_TYPE_INTEGER,
188 .name = "Gamma",
189 .minimum = 0,
190 .maximum = 0xff,
191 .step = 1,
192#define GAMMA_DEFAULT 0x10
193 .default_value = GAMMA_DEFAULT,
194 },
195 .set = sd_setgamma,
196 .get = sd_getgamma,
197 },
198 {
199#define BLUE_IDX 5
200 {
201 .id = V4L2_CID_BLUE_BALANCE,
202 .type = V4L2_CTRL_TYPE_INTEGER,
203 .name = "Blue Balance",
204 .minimum = 0,
205 .maximum = 0x7f,
206 .step = 1,
207#define BLUE_DEFAULT 0x28
208 .default_value = BLUE_DEFAULT,
209 },
210 .set = sd_setbluebalance,
211 .get = sd_getbluebalance,
212 },
213 {
214#define RED_IDX 6
215 {
216 .id = V4L2_CID_RED_BALANCE,
217 .type = V4L2_CTRL_TYPE_INTEGER,
218 .name = "Red Balance",
219 .minimum = 0,
220 .maximum = 0x7f,
221 .step = 1,
222#define RED_DEFAULT 0x28
223 .default_value = RED_DEFAULT,
224 },
225 .set = sd_setredbalance,
226 .get = sd_getredbalance,
227 },
228 {
229#define HFLIP_IDX 7
230 {
231 .id = V4L2_CID_HFLIP,
232 .type = V4L2_CTRL_TYPE_BOOLEAN,
233 .name = "Horizontal Flip",
234 .minimum = 0,
235 .maximum = 1,
236 .step = 1,
237#define HFLIP_DEFAULT 0
238 .default_value = HFLIP_DEFAULT,
239 },
240 .set = sd_sethflip,
241 .get = sd_gethflip,
242 },
243 {
244#define VFLIP_IDX 8
245 {
246 .id = V4L2_CID_VFLIP,
247 .type = V4L2_CTRL_TYPE_BOOLEAN,
248 .name = "Vertical Flip",
249 .minimum = 0,
250 .maximum = 1,
251 .step = 1,
252#define VFLIP_DEFAULT 0
253 .default_value = VFLIP_DEFAULT,
254 },
255 .set = sd_setvflip,
256 .get = sd_getvflip,
257 },
258 {
259#define EXPOSURE_IDX 9
260 {
261 .id = V4L2_CID_EXPOSURE,
262 .type = V4L2_CTRL_TYPE_INTEGER,
263 .name = "Exposure",
264 .minimum = 0,
265 .maximum = 0x1780,
266 .step = 1,
267#define EXPOSURE_DEFAULT 0x33
268 .default_value = EXPOSURE_DEFAULT,
269 },
270 .set = sd_setexposure,
271 .get = sd_getexposure,
272 },
273 {
274#define GAIN_IDX 10
275 {
276 .id = V4L2_CID_GAIN,
277 .type = V4L2_CTRL_TYPE_INTEGER,
278 .name = "Gain",
279 .minimum = 0,
280 .maximum = 28,
281 .step = 1,
282#define GAIN_DEFAULT 0x00
283 .default_value = GAIN_DEFAULT,
284 },
285 .set = sd_setgain,
286 .get = sd_getgain,
287 },
288 {
289#define AUTOGAIN_IDX 11
290 {
291 .id = V4L2_CID_AUTOGAIN,
292 .type = V4L2_CTRL_TYPE_BOOLEAN,
293 .name = "Auto Exposure",
294 .minimum = 0,
295 .maximum = 1,
296 .step = 1,
297#define AUTO_EXPOSURE_DEFAULT 1
298 .default_value = AUTO_EXPOSURE_DEFAULT,
299 },
300 .set = sd_setautoexposure,
301 .get = sd_getautoexposure,
302 },
303};
304
305static const struct v4l2_pix_format vga_mode[] = {
306 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
307 .bytesperline = 240,
308 .sizeimage = 240 * 120,
309 .colorspace = V4L2_COLORSPACE_JPEG,
310 .priv = 0 | MODE_JPEG},
311 {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
312 .bytesperline = 160,
313 .sizeimage = 160 * 120,
314 .colorspace = V4L2_COLORSPACE_SRGB,
315 .priv = 0 | MODE_RAW},
316 {160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
317 .bytesperline = 240,
318 .sizeimage = 240 * 120,
319 .colorspace = V4L2_COLORSPACE_SRGB,
320 .priv = 0},
321 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
322 .bytesperline = 480,
323 .sizeimage = 480 * 240 ,
324 .colorspace = V4L2_COLORSPACE_JPEG,
325 .priv = 1 | MODE_JPEG},
326 {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
327 .bytesperline = 320,
328 .sizeimage = 320 * 240 ,
329 .colorspace = V4L2_COLORSPACE_SRGB,
330 .priv = 1 | MODE_RAW},
331 {320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
332 .bytesperline = 480,
333 .sizeimage = 480 * 240 ,
334 .colorspace = V4L2_COLORSPACE_SRGB,
335 .priv = 1},
336 {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
337 .bytesperline = 960,
338 .sizeimage = 960 * 480,
339 .colorspace = V4L2_COLORSPACE_JPEG,
340 .priv = 2 | MODE_JPEG},
341 {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
342 .bytesperline = 640,
343 .sizeimage = 640 * 480,
344 .colorspace = V4L2_COLORSPACE_SRGB,
345 .priv = 2 | MODE_RAW},
346 {640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
347 .bytesperline = 960,
348 .sizeimage = 960 * 480,
349 .colorspace = V4L2_COLORSPACE_SRGB,
350 .priv = 2},
351};
352
353static const struct v4l2_pix_format sxga_mode[] = {
354 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
355 .bytesperline = 240,
356 .sizeimage = 240 * 120,
357 .colorspace = V4L2_COLORSPACE_JPEG,
358 .priv = 0 | MODE_JPEG},
359 {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
360 .bytesperline = 160,
361 .sizeimage = 160 * 120,
362 .colorspace = V4L2_COLORSPACE_SRGB,
363 .priv = 0 | MODE_RAW},
364 {160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
365 .bytesperline = 240,
366 .sizeimage = 240 * 120,
367 .colorspace = V4L2_COLORSPACE_SRGB,
368 .priv = 0},
369 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
370 .bytesperline = 480,
371 .sizeimage = 480 * 240 ,
372 .colorspace = V4L2_COLORSPACE_JPEG,
373 .priv = 1 | MODE_JPEG},
374 {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
375 .bytesperline = 320,
376 .sizeimage = 320 * 240 ,
377 .colorspace = V4L2_COLORSPACE_SRGB,
378 .priv = 1 | MODE_RAW},
379 {320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
380 .bytesperline = 480,
381 .sizeimage = 480 * 240 ,
382 .colorspace = V4L2_COLORSPACE_SRGB,
383 .priv = 1},
384 {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
385 .bytesperline = 960,
386 .sizeimage = 960 * 480,
387 .colorspace = V4L2_COLORSPACE_JPEG,
388 .priv = 2 | MODE_JPEG},
389 {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
390 .bytesperline = 640,
391 .sizeimage = 640 * 480,
392 .colorspace = V4L2_COLORSPACE_SRGB,
393 .priv = 2 | MODE_RAW},
394 {640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
395 .bytesperline = 960,
396 .sizeimage = 960 * 480,
397 .colorspace = V4L2_COLORSPACE_SRGB,
398 .priv = 2},
399 {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
400 .bytesperline = 1280,
401 .sizeimage = (1280 * 1024) + 64,
402 .colorspace = V4L2_COLORSPACE_SRGB,
403 .priv = 3 | MODE_RAW | MODE_SXGA},
404};
405
406static const int hsv_red_x[] = {
407 41, 44, 46, 48, 50, 52, 54, 56,
408 58, 60, 62, 64, 66, 68, 70, 72,
409 74, 76, 78, 80, 81, 83, 85, 87,
410 88, 90, 92, 93, 95, 97, 98, 100,
411 101, 102, 104, 105, 107, 108, 109, 110,
412 112, 113, 114, 115, 116, 117, 118, 119,
413 120, 121, 122, 123, 123, 124, 125, 125,
414 126, 127, 127, 128, 128, 129, 129, 129,
415 130, 130, 130, 130, 131, 131, 131, 131,
416 131, 131, 131, 131, 130, 130, 130, 130,
417 129, 129, 129, 128, 128, 127, 127, 126,
418 125, 125, 124, 123, 122, 122, 121, 120,
419 119, 118, 117, 116, 115, 114, 112, 111,
420 110, 109, 107, 106, 105, 103, 102, 101,
421 99, 98, 96, 94, 93, 91, 90, 88,
422 86, 84, 83, 81, 79, 77, 75, 74,
423 72, 70, 68, 66, 64, 62, 60, 58,
424 56, 54, 52, 49, 47, 45, 43, 41,
425 39, 36, 34, 32, 30, 28, 25, 23,
426 21, 19, 16, 14, 12, 9, 7, 5,
427 3, 0, -1, -3, -6, -8, -10, -12,
428 -15, -17, -19, -22, -24, -26, -28, -30,
429 -33, -35, -37, -39, -41, -44, -46, -48,
430 -50, -52, -54, -56, -58, -60, -62, -64,
431 -66, -68, -70, -72, -74, -76, -78, -80,
432 -81, -83, -85, -87, -88, -90, -92, -93,
433 -95, -97, -98, -100, -101, -102, -104, -105,
434 -107, -108, -109, -110, -112, -113, -114, -115,
435 -116, -117, -118, -119, -120, -121, -122, -123,
436 -123, -124, -125, -125, -126, -127, -127, -128,
437 -128, -128, -128, -128, -128, -128, -128, -128,
438 -128, -128, -128, -128, -128, -128, -128, -128,
439 -128, -128, -128, -128, -128, -128, -128, -128,
440 -128, -127, -127, -126, -125, -125, -124, -123,
441 -122, -122, -121, -120, -119, -118, -117, -116,
442 -115, -114, -112, -111, -110, -109, -107, -106,
443 -105, -103, -102, -101, -99, -98, -96, -94,
444 -93, -91, -90, -88, -86, -84, -83, -81,
445 -79, -77, -75, -74, -72, -70, -68, -66,
446 -64, -62, -60, -58, -56, -54, -52, -49,
447 -47, -45, -43, -41, -39, -36, -34, -32,
448 -30, -28, -25, -23, -21, -19, -16, -14,
449 -12, -9, -7, -5, -3, 0, 1, 3,
450 6, 8, 10, 12, 15, 17, 19, 22,
451 24, 26, 28, 30, 33, 35, 37, 39, 41
452};
453
454static const int hsv_red_y[] = {
455 82, 80, 78, 76, 74, 73, 71, 69,
456 67, 65, 63, 61, 58, 56, 54, 52,
457 50, 48, 46, 44, 41, 39, 37, 35,
458 32, 30, 28, 26, 23, 21, 19, 16,
459 14, 12, 10, 7, 5, 3, 0, -1,
460 -3, -6, -8, -10, -13, -15, -17, -19,
461 -22, -24, -26, -29, -31, -33, -35, -38,
462 -40, -42, -44, -46, -48, -51, -53, -55,
463 -57, -59, -61, -63, -65, -67, -69, -71,
464 -73, -75, -77, -79, -81, -82, -84, -86,
465 -88, -89, -91, -93, -94, -96, -98, -99,
466 -101, -102, -104, -105, -106, -108, -109, -110,
467 -112, -113, -114, -115, -116, -117, -119, -120,
468 -120, -121, -122, -123, -124, -125, -126, -126,
469 -127, -128, -128, -128, -128, -128, -128, -128,
470 -128, -128, -128, -128, -128, -128, -128, -128,
471 -128, -128, -128, -128, -128, -128, -128, -128,
472 -128, -128, -128, -128, -128, -128, -128, -128,
473 -127, -127, -126, -125, -125, -124, -123, -122,
474 -121, -120, -119, -118, -117, -116, -115, -114,
475 -113, -111, -110, -109, -107, -106, -105, -103,
476 -102, -100, -99, -97, -96, -94, -92, -91,
477 -89, -87, -85, -84, -82, -80, -78, -76,
478 -74, -73, -71, -69, -67, -65, -63, -61,
479 -58, -56, -54, -52, -50, -48, -46, -44,
480 -41, -39, -37, -35, -32, -30, -28, -26,
481 -23, -21, -19, -16, -14, -12, -10, -7,
482 -5, -3, 0, 1, 3, 6, 8, 10,
483 13, 15, 17, 19, 22, 24, 26, 29,
484 31, 33, 35, 38, 40, 42, 44, 46,
485 48, 51, 53, 55, 57, 59, 61, 63,
486 65, 67, 69, 71, 73, 75, 77, 79,
487 81, 82, 84, 86, 88, 89, 91, 93,
488 94, 96, 98, 99, 101, 102, 104, 105,
489 106, 108, 109, 110, 112, 113, 114, 115,
490 116, 117, 119, 120, 120, 121, 122, 123,
491 124, 125, 126, 126, 127, 128, 128, 129,
492 129, 130, 130, 131, 131, 131, 131, 132,
493 132, 132, 132, 132, 132, 132, 132, 132,
494 132, 132, 132, 131, 131, 131, 130, 130,
495 130, 129, 129, 128, 127, 127, 126, 125,
496 125, 124, 123, 122, 121, 120, 119, 118,
497 117, 116, 115, 114, 113, 111, 110, 109,
498 107, 106, 105, 103, 102, 100, 99, 97,
499 96, 94, 92, 91, 89, 87, 85, 84, 82
500};
501
502static const int hsv_green_x[] = {
503 -124, -124, -125, -125, -125, -125, -125, -125,
504 -125, -126, -126, -125, -125, -125, -125, -125,
505 -125, -124, -124, -124, -123, -123, -122, -122,
506 -121, -121, -120, -120, -119, -118, -117, -117,
507 -116, -115, -114, -113, -112, -111, -110, -109,
508 -108, -107, -105, -104, -103, -102, -100, -99,
509 -98, -96, -95, -93, -92, -91, -89, -87,
510 -86, -84, -83, -81, -79, -77, -76, -74,
511 -72, -70, -69, -67, -65, -63, -61, -59,
512 -57, -55, -53, -51, -49, -47, -45, -43,
513 -41, -39, -37, -35, -33, -30, -28, -26,
514 -24, -22, -20, -18, -15, -13, -11, -9,
515 -7, -4, -2, 0, 1, 3, 6, 8,
516 10, 12, 14, 17, 19, 21, 23, 25,
517 27, 29, 32, 34, 36, 38, 40, 42,
518 44, 46, 48, 50, 52, 54, 56, 58,
519 60, 62, 64, 66, 68, 70, 71, 73,
520 75, 77, 78, 80, 82, 83, 85, 87,
521 88, 90, 91, 93, 94, 96, 97, 98,
522 100, 101, 102, 104, 105, 106, 107, 108,
523 109, 111, 112, 113, 113, 114, 115, 116,
524 117, 118, 118, 119, 120, 120, 121, 122,
525 122, 123, 123, 124, 124, 124, 125, 125,
526 125, 125, 125, 125, 125, 126, 126, 125,
527 125, 125, 125, 125, 125, 124, 124, 124,
528 123, 123, 122, 122, 121, 121, 120, 120,
529 119, 118, 117, 117, 116, 115, 114, 113,
530 112, 111, 110, 109, 108, 107, 105, 104,
531 103, 102, 100, 99, 98, 96, 95, 93,
532 92, 91, 89, 87, 86, 84, 83, 81,
533 79, 77, 76, 74, 72, 70, 69, 67,
534 65, 63, 61, 59, 57, 55, 53, 51,
535 49, 47, 45, 43, 41, 39, 37, 35,
536 33, 30, 28, 26, 24, 22, 20, 18,
537 15, 13, 11, 9, 7, 4, 2, 0,
538 -1, -3, -6, -8, -10, -12, -14, -17,
539 -19, -21, -23, -25, -27, -29, -32, -34,
540 -36, -38, -40, -42, -44, -46, -48, -50,
541 -52, -54, -56, -58, -60, -62, -64, -66,
542 -68, -70, -71, -73, -75, -77, -78, -80,
543 -82, -83, -85, -87, -88, -90, -91, -93,
544 -94, -96, -97, -98, -100, -101, -102, -104,
545 -105, -106, -107, -108, -109, -111, -112, -113,
546 -113, -114, -115, -116, -117, -118, -118, -119,
547 -120, -120, -121, -122, -122, -123, -123, -124, -124
548};
549
550static const int hsv_green_y[] = {
551 -100, -99, -98, -97, -95, -94, -93, -91,
552 -90, -89, -87, -86, -84, -83, -81, -80,
553 -78, -76, -75, -73, -71, -70, -68, -66,
554 -64, -63, -61, -59, -57, -55, -53, -51,
555 -49, -48, -46, -44, -42, -40, -38, -36,
556 -34, -32, -30, -27, -25, -23, -21, -19,
557 -17, -15, -13, -11, -9, -7, -4, -2,
558 0, 1, 3, 5, 7, 9, 11, 14,
559 16, 18, 20, 22, 24, 26, 28, 30,
560 32, 34, 36, 38, 40, 42, 44, 46,
561 48, 50, 52, 54, 56, 58, 59, 61,
562 63, 65, 67, 68, 70, 72, 74, 75,
563 77, 78, 80, 82, 83, 85, 86, 88,
564 89, 90, 92, 93, 95, 96, 97, 98,
565 100, 101, 102, 103, 104, 105, 106, 107,
566 108, 109, 110, 111, 112, 112, 113, 114,
567 115, 115, 116, 116, 117, 117, 118, 118,
568 119, 119, 119, 120, 120, 120, 120, 120,
569 121, 121, 121, 121, 121, 121, 120, 120,
570 120, 120, 120, 119, 119, 119, 118, 118,
571 117, 117, 116, 116, 115, 114, 114, 113,
572 112, 111, 111, 110, 109, 108, 107, 106,
573 105, 104, 103, 102, 100, 99, 98, 97,
574 95, 94, 93, 91, 90, 89, 87, 86,
575 84, 83, 81, 80, 78, 76, 75, 73,
576 71, 70, 68, 66, 64, 63, 61, 59,
577 57, 55, 53, 51, 49, 48, 46, 44,
578 42, 40, 38, 36, 34, 32, 30, 27,
579 25, 23, 21, 19, 17, 15, 13, 11,
580 9, 7, 4, 2, 0, -1, -3, -5,
581 -7, -9, -11, -14, -16, -18, -20, -22,
582 -24, -26, -28, -30, -32, -34, -36, -38,
583 -40, -42, -44, -46, -48, -50, -52, -54,
584 -56, -58, -59, -61, -63, -65, -67, -68,
585 -70, -72, -74, -75, -77, -78, -80, -82,
586 -83, -85, -86, -88, -89, -90, -92, -93,
587 -95, -96, -97, -98, -100, -101, -102, -103,
588 -104, -105, -106, -107, -108, -109, -110, -111,
589 -112, -112, -113, -114, -115, -115, -116, -116,
590 -117, -117, -118, -118, -119, -119, -119, -120,
591 -120, -120, -120, -120, -121, -121, -121, -121,
592 -121, -121, -120, -120, -120, -120, -120, -119,
593 -119, -119, -118, -118, -117, -117, -116, -116,
594 -115, -114, -114, -113, -112, -111, -111, -110,
595 -109, -108, -107, -106, -105, -104, -103, -102, -100
596};
597
598static const int hsv_blue_x[] = {
599 112, 113, 114, 114, 115, 116, 117, 117,
600 118, 118, 119, 119, 120, 120, 120, 121,
601 121, 121, 122, 122, 122, 122, 122, 122,
602 122, 122, 122, 122, 122, 122, 121, 121,
603 121, 120, 120, 120, 119, 119, 118, 118,
604 117, 116, 116, 115, 114, 113, 113, 112,
605 111, 110, 109, 108, 107, 106, 105, 104,
606 103, 102, 100, 99, 98, 97, 95, 94,
607 93, 91, 90, 88, 87, 85, 84, 82,
608 80, 79, 77, 76, 74, 72, 70, 69,
609 67, 65, 63, 61, 60, 58, 56, 54,
610 52, 50, 48, 46, 44, 42, 40, 38,
611 36, 34, 32, 30, 28, 26, 24, 22,
612 19, 17, 15, 13, 11, 9, 7, 5,
613 2, 0, -1, -3, -5, -7, -9, -12,
614 -14, -16, -18, -20, -22, -24, -26, -28,
615 -31, -33, -35, -37, -39, -41, -43, -45,
616 -47, -49, -51, -53, -54, -56, -58, -60,
617 -62, -64, -66, -67, -69, -71, -73, -74,
618 -76, -78, -79, -81, -83, -84, -86, -87,
619 -89, -90, -92, -93, -94, -96, -97, -98,
620 -99, -101, -102, -103, -104, -105, -106, -107,
621 -108, -109, -110, -111, -112, -113, -114, -114,
622 -115, -116, -117, -117, -118, -118, -119, -119,
623 -120, -120, -120, -121, -121, -121, -122, -122,
624 -122, -122, -122, -122, -122, -122, -122, -122,
625 -122, -122, -121, -121, -121, -120, -120, -120,
626 -119, -119, -118, -118, -117, -116, -116, -115,
627 -114, -113, -113, -112, -111, -110, -109, -108,
628 -107, -106, -105, -104, -103, -102, -100, -99,
629 -98, -97, -95, -94, -93, -91, -90, -88,
630 -87, -85, -84, -82, -80, -79, -77, -76,
631 -74, -72, -70, -69, -67, -65, -63, -61,
632 -60, -58, -56, -54, -52, -50, -48, -46,
633 -44, -42, -40, -38, -36, -34, -32, -30,
634 -28, -26, -24, -22, -19, -17, -15, -13,
635 -11, -9, -7, -5, -2, 0, 1, 3,
636 5, 7, 9, 12, 14, 16, 18, 20,
637 22, 24, 26, 28, 31, 33, 35, 37,
638 39, 41, 43, 45, 47, 49, 51, 53,
639 54, 56, 58, 60, 62, 64, 66, 67,
640 69, 71, 73, 74, 76, 78, 79, 81,
641 83, 84, 86, 87, 89, 90, 92, 93,
642 94, 96, 97, 98, 99, 101, 102, 103,
643 104, 105, 106, 107, 108, 109, 110, 111, 112
644};
645
646static const int hsv_blue_y[] = {
647 -11, -13, -15, -17, -19, -21, -23, -25,
648 -27, -29, -31, -33, -35, -37, -39, -41,
649 -43, -45, -46, -48, -50, -52, -54, -55,
650 -57, -59, -61, -62, -64, -66, -67, -69,
651 -71, -72, -74, -75, -77, -78, -80, -81,
652 -83, -84, -86, -87, -88, -90, -91, -92,
653 -93, -95, -96, -97, -98, -99, -100, -101,
654 -102, -103, -104, -105, -106, -106, -107, -108,
655 -109, -109, -110, -111, -111, -112, -112, -113,
656 -113, -114, -114, -114, -115, -115, -115, -115,
657 -116, -116, -116, -116, -116, -116, -116, -116,
658 -116, -115, -115, -115, -115, -114, -114, -114,
659 -113, -113, -112, -112, -111, -111, -110, -110,
660 -109, -108, -108, -107, -106, -105, -104, -103,
661 -102, -101, -100, -99, -98, -97, -96, -95,
662 -94, -93, -91, -90, -89, -88, -86, -85,
663 -84, -82, -81, -79, -78, -76, -75, -73,
664 -71, -70, -68, -67, -65, -63, -62, -60,
665 -58, -56, -55, -53, -51, -49, -47, -45,
666 -44, -42, -40, -38, -36, -34, -32, -30,
667 -28, -26, -24, -22, -20, -18, -16, -14,
668 -12, -10, -8, -6, -4, -2, 0, 1,
669 3, 5, 7, 9, 11, 13, 15, 17,
670 19, 21, 23, 25, 27, 29, 31, 33,
671 35, 37, 39, 41, 43, 45, 46, 48,
672 50, 52, 54, 55, 57, 59, 61, 62,
673 64, 66, 67, 69, 71, 72, 74, 75,
674 77, 78, 80, 81, 83, 84, 86, 87,
675 88, 90, 91, 92, 93, 95, 96, 97,
676 98, 99, 100, 101, 102, 103, 104, 105,
677 106, 106, 107, 108, 109, 109, 110, 111,
678 111, 112, 112, 113, 113, 114, 114, 114,
679 115, 115, 115, 115, 116, 116, 116, 116,
680 116, 116, 116, 116, 116, 115, 115, 115,
681 115, 114, 114, 114, 113, 113, 112, 112,
682 111, 111, 110, 110, 109, 108, 108, 107,
683 106, 105, 104, 103, 102, 101, 100, 99,
684 98, 97, 96, 95, 94, 93, 91, 90,
685 89, 88, 86, 85, 84, 82, 81, 79,
686 78, 76, 75, 73, 71, 70, 68, 67,
687 65, 63, 62, 60, 58, 56, 55, 53,
688 51, 49, 47, 45, 44, 42, 40, 38,
689 36, 34, 32, 30, 28, 26, 24, 22,
690 20, 18, 16, 14, 12, 10, 8, 6,
691 4, 2, 0, -1, -3, -5, -7, -9, -11
692};
693
694static u16 i2c_ident[] = {
695 V4L2_IDENT_OV9650,
696 V4L2_IDENT_OV9655,
697 V4L2_IDENT_SOI968,
698 V4L2_IDENT_OV7660,
699 V4L2_IDENT_OV7670,
700 V4L2_IDENT_MT9V011,
701 V4L2_IDENT_MT9V111,
702 V4L2_IDENT_MT9V112,
703 V4L2_IDENT_MT9M001C12ST,
704 V4L2_IDENT_MT9M111,
705 V4L2_IDENT_HV7131R,
706};
707
708static u16 bridge_init[][2] = {
709 {0x1000, 0x78}, {0x1001, 0x40}, {0x1002, 0x1c},
710 {0x1020, 0x80}, {0x1061, 0x01}, {0x1067, 0x40},
711 {0x1068, 0x30}, {0x1069, 0x20}, {0x106a, 0x10},
712 {0x106b, 0x08}, {0x1188, 0x87}, {0x11a1, 0x00},
713 {0x11a2, 0x00}, {0x11a3, 0x6a}, {0x11a4, 0x50},
714 {0x11ab, 0x00}, {0x11ac, 0x00}, {0x11ad, 0x50},
715 {0x11ae, 0x3c}, {0x118a, 0x04}, {0x0395, 0x04},
716 {0x11b8, 0x3a}, {0x118b, 0x0e}, {0x10f7, 0x05},
717 {0x10f8, 0x14}, {0x10fa, 0xff}, {0x10f9, 0x00},
718 {0x11ba, 0x0a}, {0x11a5, 0x2d}, {0x11a6, 0x2d},
719 {0x11a7, 0x3a}, {0x11a8, 0x05}, {0x11a9, 0x04},
720 {0x11aa, 0x3f}, {0x11af, 0x28}, {0x11b0, 0xd8},
721 {0x11b1, 0x14}, {0x11b2, 0xec}, {0x11b3, 0x32},
722 {0x11b4, 0xdd}, {0x11b5, 0x32}, {0x11b6, 0xdd},
723 {0x10e0, 0x2c}, {0x11bc, 0x40}, {0x11bd, 0x01},
724 {0x11be, 0xf0}, {0x11bf, 0x00}, {0x118c, 0x1f},
725 {0x118d, 0x1f}, {0x118e, 0x1f}, {0x118f, 0x1f},
726 {0x1180, 0x01}, {0x1181, 0x00}, {0x1182, 0x01},
727 {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80}
728};
729
730/* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */
731static u8 ov_gain[] = {
732 0x00 /* 1x */, 0x04 /* 1.25x */, 0x08 /* 1.5x */, 0x0c /* 1.75x */,
733 0x10 /* 2x */, 0x12 /* 2.25x */, 0x14 /* 2.5x */, 0x16 /* 2.75x */,
734 0x18 /* 3x */, 0x1a /* 3.25x */, 0x1c /* 3.5x */, 0x1e /* 3.75x */,
735 0x30 /* 4x */, 0x31 /* 4.25x */, 0x32 /* 4.5x */, 0x33 /* 4.75x */,
736 0x34 /* 5x */, 0x35 /* 5.25x */, 0x36 /* 5.5x */, 0x37 /* 5.75x */,
737 0x38 /* 6x */, 0x39 /* 6.25x */, 0x3a /* 6.5x */, 0x3b /* 6.75x */,
738 0x3c /* 7x */, 0x3d /* 7.25x */, 0x3e /* 7.5x */, 0x3f /* 7.75x */,
739 0x70 /* 8x */
740};
741
742/* Gain = (bit[8] + 1) * (bit[7] + 1) * (bit[6:0] * 0.03125) */
743static u16 micron1_gain[] = {
744 /* 1x 1.25x 1.5x 1.75x */
745 0x0020, 0x0028, 0x0030, 0x0038,
746 /* 2x 2.25x 2.5x 2.75x */
747 0x00a0, 0x00a4, 0x00a8, 0x00ac,
748 /* 3x 3.25x 3.5x 3.75x */
749 0x00b0, 0x00b4, 0x00b8, 0x00bc,
750 /* 4x 4.25x 4.5x 4.75x */
751 0x00c0, 0x00c4, 0x00c8, 0x00cc,
752 /* 5x 5.25x 5.5x 5.75x */
753 0x00d0, 0x00d4, 0x00d8, 0x00dc,
754 /* 6x 6.25x 6.5x 6.75x */
755 0x00e0, 0x00e4, 0x00e8, 0x00ec,
756 /* 7x 7.25x 7.5x 7.75x */
757 0x00f0, 0x00f4, 0x00f8, 0x00fc,
758 /* 8x */
759 0x01c0
760};
761
762/* mt9m001 sensor uses a different gain formula then other micron sensors */
763/* Gain = (bit[6] + 1) * (bit[5-0] * 0.125) */
764static u16 micron2_gain[] = {
765 /* 1x 1.25x 1.5x 1.75x */
766 0x0008, 0x000a, 0x000c, 0x000e,
767 /* 2x 2.25x 2.5x 2.75x */
768 0x0010, 0x0012, 0x0014, 0x0016,
769 /* 3x 3.25x 3.5x 3.75x */
770 0x0018, 0x001a, 0x001c, 0x001e,
771 /* 4x 4.25x 4.5x 4.75x */
772 0x0020, 0x0051, 0x0052, 0x0053,
773 /* 5x 5.25x 5.5x 5.75x */
774 0x0054, 0x0055, 0x0056, 0x0057,
775 /* 6x 6.25x 6.5x 6.75x */
776 0x0058, 0x0059, 0x005a, 0x005b,
777 /* 7x 7.25x 7.5x 7.75x */
778 0x005c, 0x005d, 0x005e, 0x005f,
779 /* 8x */
780 0x0060
781};
782
783/* Gain = .5 + bit[7:0] / 16 */
784static u8 hv7131r_gain[] = {
785 0x08 /* 1x */, 0x0c /* 1.25x */, 0x10 /* 1.5x */, 0x14 /* 1.75x */,
786 0x18 /* 2x */, 0x1c /* 2.25x */, 0x20 /* 2.5x */, 0x24 /* 2.75x */,
787 0x28 /* 3x */, 0x2c /* 3.25x */, 0x30 /* 3.5x */, 0x34 /* 3.75x */,
788 0x38 /* 4x */, 0x3c /* 4.25x */, 0x40 /* 4.5x */, 0x44 /* 4.75x */,
789 0x48 /* 5x */, 0x4c /* 5.25x */, 0x50 /* 5.5x */, 0x54 /* 5.75x */,
790 0x58 /* 6x */, 0x5c /* 6.25x */, 0x60 /* 6.5x */, 0x64 /* 6.75x */,
791 0x68 /* 7x */, 0x6c /* 7.25x */, 0x70 /* 7.5x */, 0x74 /* 7.75x */,
792 0x78 /* 8x */
793};
794
795static u8 soi968_init[][2] = {
796 {0x12, 0x80}, {0x0c, 0x00}, {0x0f, 0x1f},
797 {0x11, 0x80}, {0x38, 0x52}, {0x1e, 0x00},
798 {0x33, 0x08}, {0x35, 0x8c}, {0x36, 0x0c},
799 {0x37, 0x04}, {0x45, 0x04}, {0x47, 0xff},
800 {0x3e, 0x00}, {0x3f, 0x00}, {0x3b, 0x20},
801 {0x3a, 0x96}, {0x3d, 0x0a}, {0x14, 0x8e},
802 {0x13, 0x8a}, {0x12, 0x40}, {0x17, 0x13},
803 {0x18, 0x63}, {0x19, 0x01}, {0x1a, 0x79},
804 {0x32, 0x24}, {0x03, 0x00}, {0x11, 0x40},
805 {0x2a, 0x10}, {0x2b, 0xe0}, {0x10, 0x32},
806 {0x00, 0x00}, {0x01, 0x80}, {0x02, 0x80},
807};
808
809static u8 ov7660_init[][2] = {
810 {0x0e, 0x80}, {0x0d, 0x08}, {0x0f, 0xc3},
811 {0x04, 0xc3}, {0x10, 0x40}, {0x11, 0x40},
812 {0x12, 0x05}, {0x13, 0xba}, {0x14, 0x2a},
813 {0x37, 0x0f}, {0x38, 0x02}, {0x39, 0x43},
814 {0x3a, 0x00}, {0x69, 0x90}, {0x2d, 0xf6},
815 {0x2e, 0x0b}, {0x01, 0x78}, {0x02, 0x50},
816};
817
818static u8 ov7670_init[][2] = {
819 {0x12, 0x80}, {0x11, 0x80}, {0x3a, 0x04}, {0x12, 0x01},
820 {0x32, 0xb6}, {0x03, 0x0a}, {0x0c, 0x00}, {0x3e, 0x00},
821 {0x70, 0x3a}, {0x71, 0x35}, {0x72, 0x11}, {0x73, 0xf0},
822 {0xa2, 0x02}, {0x13, 0xe0}, {0x00, 0x00}, {0x10, 0x00},
823 {0x0d, 0x40}, {0x14, 0x28}, {0xa5, 0x05}, {0xab, 0x07},
824 {0x24, 0x95}, {0x25, 0x33}, {0x26, 0xe3}, {0x9f, 0x75},
825 {0xa0, 0x65}, {0xa1, 0x0b}, {0xa6, 0xd8}, {0xa7, 0xd8},
826 {0xa8, 0xf0}, {0xa9, 0x90}, {0xaa, 0x94}, {0x13, 0xe5},
827 {0x0e, 0x61}, {0x0f, 0x4b}, {0x16, 0x02}, {0x1e, 0x27},
828 {0x21, 0x02}, {0x22, 0x91}, {0x29, 0x07}, {0x33, 0x0b},
829 {0x35, 0x0b}, {0x37, 0x1d}, {0x38, 0x71}, {0x39, 0x2a},
830 {0x3c, 0x78}, {0x4d, 0x40}, {0x4e, 0x20}, {0x69, 0x00},
831 {0x74, 0x19}, {0x8d, 0x4f}, {0x8e, 0x00}, {0x8f, 0x00},
832 {0x90, 0x00}, {0x91, 0x00}, {0x96, 0x00}, {0x9a, 0x80},
833 {0xb0, 0x84}, {0xb1, 0x0c}, {0xb2, 0x0e}, {0xb3, 0x82},
834 {0xb8, 0x0a}, {0x43, 0x0a}, {0x44, 0xf0}, {0x45, 0x20},
835 {0x46, 0x7d}, {0x47, 0x29}, {0x48, 0x4a}, {0x59, 0x8c},
836 {0x5a, 0xa5}, {0x5b, 0xde}, {0x5c, 0x96}, {0x5d, 0x66},
837 {0x5e, 0x10}, {0x6c, 0x0a}, {0x6d, 0x55}, {0x6e, 0x11},
838 {0x6f, 0x9e}, {0x6a, 0x40}, {0x01, 0x40}, {0x02, 0x40},
839 {0x13, 0xe7}, {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x02},
840 {0x52, 0x1d}, {0x53, 0x56}, {0x54, 0x73}, {0x55, 0x0a},
841 {0x56, 0x55}, {0x57, 0x80}, {0x58, 0x9e}, {0x41, 0x08},
842 {0x3f, 0x02}, {0x75, 0x03}, {0x76, 0x63}, {0x4c, 0x04},
843 {0x77, 0x06}, {0x3d, 0x02}, {0x4b, 0x09}, {0xc9, 0x30},
844 {0x41, 0x08}, {0x56, 0x48}, {0x34, 0x11}, {0xa4, 0x88},
845 {0x96, 0x00}, {0x97, 0x30}, {0x98, 0x20}, {0x99, 0x30},
846 {0x9a, 0x84}, {0x9b, 0x29}, {0x9c, 0x03}, {0x9d, 0x99},
847 {0x9e, 0x7f}, {0x78, 0x04}, {0x79, 0x01}, {0xc8, 0xf0},
848 {0x79, 0x0f}, {0xc8, 0x00}, {0x79, 0x10}, {0xc8, 0x7e},
849 {0x79, 0x0a}, {0xc8, 0x80}, {0x79, 0x0b}, {0xc8, 0x01},
850 {0x79, 0x0c}, {0xc8, 0x0f}, {0x79, 0x0d}, {0xc8, 0x20},
851 {0x79, 0x09}, {0xc8, 0x80}, {0x79, 0x02}, {0xc8, 0xc0},
852 {0x79, 0x03}, {0xc8, 0x40}, {0x79, 0x05}, {0xc8, 0x30},
853 {0x79, 0x26}, {0x62, 0x20}, {0x63, 0x00}, {0x64, 0x06},
854 {0x65, 0x00}, {0x66, 0x05}, {0x94, 0x05}, {0x95, 0x0a},
855 {0x17, 0x13}, {0x18, 0x01}, {0x19, 0x02}, {0x1a, 0x7a},
856 {0x46, 0x59}, {0x47, 0x30}, {0x58, 0x9a}, {0x59, 0x84},
857 {0x5a, 0x91}, {0x5b, 0x57}, {0x5c, 0x75}, {0x5d, 0x6d},
858 {0x5e, 0x13}, {0x64, 0x07}, {0x94, 0x07}, {0x95, 0x0d},
859 {0xa6, 0xdf}, {0xa7, 0xdf}, {0x48, 0x4d}, {0x51, 0x00},
860 {0x6b, 0x0a}, {0x11, 0x80}, {0x2a, 0x00}, {0x2b, 0x00},
861 {0x92, 0x00}, {0x93, 0x00}, {0x55, 0x0a}, {0x56, 0x60},
862 {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x00}, {0x52, 0x1d},
863 {0x53, 0x56}, {0x54, 0x73}, {0x58, 0x9a}, {0x4f, 0x6e},
864 {0x50, 0x70}, {0x51, 0x00}, {0x52, 0x1d}, {0x53, 0x56},
865 {0x54, 0x73}, {0x58, 0x9a}, {0x3f, 0x01}, {0x7b, 0x03},
866 {0x7c, 0x09}, {0x7d, 0x16}, {0x7e, 0x38}, {0x7f, 0x47},
867 {0x80, 0x53}, {0x81, 0x5e}, {0x82, 0x6a}, {0x83, 0x74},
868 {0x84, 0x80}, {0x85, 0x8c}, {0x86, 0x9b}, {0x87, 0xb2},
869 {0x88, 0xcc}, {0x89, 0xe5}, {0x7a, 0x24}, {0x3b, 0x00},
870 {0x9f, 0x76}, {0xa0, 0x65}, {0x13, 0xe2}, {0x6b, 0x0a},
871 {0x11, 0x80}, {0x2a, 0x00}, {0x2b, 0x00}, {0x92, 0x00},
872 {0x93, 0x00},
873};
874
875static u8 ov9650_init[][2] = {
876 {0x12, 0x80}, {0x00, 0x00}, {0x01, 0x78},
877 {0x02, 0x78}, {0x03, 0x36}, {0x04, 0x03},
878 {0x05, 0x00}, {0x06, 0x00}, {0x08, 0x00},
879 {0x09, 0x01}, {0x0c, 0x00}, {0x0d, 0x00},
880 {0x0e, 0xa0}, {0x0f, 0x52}, {0x10, 0x7c},
881 {0x11, 0x80}, {0x12, 0x45}, {0x13, 0xc2},
882 {0x14, 0x2e}, {0x15, 0x00}, {0x16, 0x07},
883 {0x17, 0x24}, {0x18, 0xc5}, {0x19, 0x00},
884 {0x1a, 0x3c}, {0x1b, 0x00}, {0x1e, 0x04},
885 {0x1f, 0x00}, {0x24, 0x78}, {0x25, 0x68},
886 {0x26, 0xd4}, {0x27, 0x80}, {0x28, 0x80},
887 {0x29, 0x30}, {0x2a, 0x00}, {0x2b, 0x00},
888 {0x2c, 0x80}, {0x2d, 0x00}, {0x2e, 0x00},
889 {0x2f, 0x00}, {0x30, 0x08}, {0x31, 0x30},
890 {0x32, 0x84}, {0x33, 0xe2}, {0x34, 0xbf},
891 {0x35, 0x81}, {0x36, 0xf9}, {0x37, 0x00},
892 {0x38, 0x93}, {0x39, 0x50}, {0x3a, 0x01},
893 {0x3b, 0x01}, {0x3c, 0x73}, {0x3d, 0x19},
894 {0x3e, 0x0b}, {0x3f, 0x80}, {0x40, 0xc1},
895 {0x41, 0x00}, {0x42, 0x08}, {0x67, 0x80},
896 {0x68, 0x80}, {0x69, 0x40}, {0x6a, 0x00},
897 {0x6b, 0x0a}, {0x8b, 0x06}, {0x8c, 0x20},
898 {0x8d, 0x00}, {0x8e, 0x00}, {0x8f, 0xdf},
899 {0x92, 0x00}, {0x93, 0x00}, {0x94, 0x88},
900 {0x95, 0x88}, {0x96, 0x04}, {0xa1, 0x00},
901 {0xa5, 0x80}, {0xa8, 0x80}, {0xa9, 0xb8},
902 {0xaa, 0x92}, {0xab, 0x0a},
903};
904
905static u8 ov9655_init[][2] = {
906 {0x12, 0x80}, {0x12, 0x01}, {0x0d, 0x00}, {0x0e, 0x61},
907 {0x11, 0x80}, {0x13, 0xba}, {0x14, 0x2e}, {0x16, 0x24},
908 {0x1e, 0x04}, {0x1e, 0x04}, {0x1e, 0x04}, {0x27, 0x08},
909 {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x32, 0xbf},
910 {0x34, 0x3d}, {0x35, 0x00}, {0x36, 0xf8}, {0x38, 0x12},
911 {0x39, 0x57}, {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c},
912 {0x3d, 0x19}, {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40},
913 {0x42, 0x80}, {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a},
914 {0x48, 0x3c}, {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc},
915 {0x4d, 0xdc}, {0x4e, 0xdc}, {0x69, 0x02}, {0x6c, 0x04},
916 {0x6f, 0x9e}, {0x70, 0x05}, {0x71, 0x78}, {0x77, 0x02},
917 {0x8a, 0x23}, {0x8c, 0x0d}, {0x90, 0x7e}, {0x91, 0x7c},
918 {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68}, {0xa6, 0x60},
919 {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92}, {0xab, 0x04},
920 {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80}, {0xaf, 0x80},
921 {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00}, {0xb6, 0xaf},
922 {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44}, {0xbe, 0x3b},
923 {0xbf, 0x3a}, {0xc0, 0xe2}, {0xc1, 0xc8}, {0xc2, 0x01},
924 {0xc4, 0x00}, {0xc6, 0x85}, {0xc7, 0x81}, {0xc9, 0xe0},
925 {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x12, 0x61},
926 {0x36, 0xfa}, {0x8c, 0x8d}, {0xc0, 0xaa}, {0x69, 0x0a},
927 {0x03, 0x12}, {0x17, 0x14}, {0x18, 0x00}, {0x19, 0x01},
928 {0x1a, 0x3d}, {0x32, 0xbf}, {0x11, 0x80}, {0x2a, 0x10},
929 {0x2b, 0x0a}, {0x92, 0x00}, {0x93, 0x00}, {0x1e, 0x04},
930 {0x1e, 0x04}, {0x10, 0x7c}, {0x04, 0x03}, {0xa1, 0x00},
931 {0x2d, 0x00}, {0x2e, 0x00}, {0x00, 0x00}, {0x01, 0x80},
932 {0x02, 0x80}, {0x12, 0x61}, {0x36, 0xfa}, {0x8c, 0x8d},
933 {0xc0, 0xaa}, {0x69, 0x0a}, {0x03, 0x12}, {0x17, 0x14},
934 {0x18, 0x00}, {0x19, 0x01}, {0x1a, 0x3d}, {0x32, 0xbf},
935 {0x11, 0x80}, {0x2a, 0x10}, {0x2b, 0x0a}, {0x92, 0x00},
936 {0x93, 0x00}, {0x04, 0x01}, {0x10, 0x1f}, {0xa1, 0x00},
937 {0x00, 0x0a}, {0xa1, 0x00}, {0x10, 0x5d}, {0x04, 0x03},
938 {0x00, 0x01}, {0xa1, 0x00}, {0x10, 0x7c}, {0x04, 0x03},
939 {0x00, 0x03}, {0x00, 0x0a}, {0x00, 0x10}, {0x00, 0x13},
940};
941
942static u16 mt9v112_init[][2] = {
943 {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0020},
944 {0x34, 0xc019}, {0x0a, 0x0011}, {0x0b, 0x000b},
945 {0x20, 0x0703}, {0x35, 0x2022}, {0xf0, 0x0001},
946 {0x05, 0x0000}, {0x06, 0x340c}, {0x3b, 0x042a},
947 {0x3c, 0x0400}, {0xf0, 0x0002}, {0x2e, 0x0c58},
948 {0x5b, 0x0001}, {0xc8, 0x9f0b}, {0xf0, 0x0001},
949 {0x9b, 0x5300}, {0xf0, 0x0000}, {0x2b, 0x0020},
950 {0x2c, 0x002a}, {0x2d, 0x0032}, {0x2e, 0x0020},
951 {0x09, 0x01dc}, {0x01, 0x000c}, {0x02, 0x0020},
952 {0x03, 0x01e0}, {0x04, 0x0280}, {0x06, 0x000c},
953 {0x05, 0x0098}, {0x20, 0x0703}, {0x09, 0x01f2},
954 {0x2b, 0x00a0}, {0x2c, 0x00a0}, {0x2d, 0x00a0},
955 {0x2e, 0x00a0}, {0x01, 0x000c}, {0x02, 0x0020},
956 {0x03, 0x01e0}, {0x04, 0x0280}, {0x06, 0x000c},
957 {0x05, 0x0098}, {0x09, 0x01c1}, {0x2b, 0x00ae},
958 {0x2c, 0x00ae}, {0x2d, 0x00ae}, {0x2e, 0x00ae},
959};
960
961static u16 mt9v111_init[][2] = {
962 {0x01, 0x0004}, {0x0d, 0x0001}, {0x0d, 0x0000},
963 {0x01, 0x0001}, {0x02, 0x0016}, {0x03, 0x01e1},
964 {0x04, 0x0281}, {0x05, 0x0004}, {0x07, 0x3002},
965 {0x21, 0x0000}, {0x25, 0x4024}, {0x26, 0xff03},
966 {0x27, 0xff10}, {0x2b, 0x7828}, {0x2c, 0xb43c},
967 {0x2d, 0xf0a0}, {0x2e, 0x0c64}, {0x2f, 0x0064},
968 {0x67, 0x4010}, {0x06, 0x301e}, {0x08, 0x0480},
969 {0x01, 0x0004}, {0x02, 0x0016}, {0x03, 0x01e6},
970 {0x04, 0x0286}, {0x05, 0x0004}, {0x06, 0x0000},
971 {0x07, 0x3002}, {0x08, 0x0008}, {0x0c, 0x0000},
972 {0x0d, 0x0000}, {0x0e, 0x0000}, {0x0f, 0x0000},
973 {0x10, 0x0000}, {0x11, 0x0000}, {0x12, 0x00b0},
974 {0x13, 0x007c}, {0x14, 0x0000}, {0x15, 0x0000},
975 {0x16, 0x0000}, {0x17, 0x0000}, {0x18, 0x0000},
976 {0x19, 0x0000}, {0x1a, 0x0000}, {0x1b, 0x0000},
977 {0x1c, 0x0000}, {0x1d, 0x0000}, {0x30, 0x0000},
978 {0x30, 0x0005}, {0x31, 0x0000}, {0x02, 0x0016},
979 {0x03, 0x01e1}, {0x04, 0x0281}, {0x05, 0x0004},
980 {0x06, 0x0000}, {0x07, 0x3002}, {0x06, 0x002d},
981 {0x05, 0x0004}, {0x09, 0x0064}, {0x2b, 0x00a0},
982 {0x2c, 0x00a0}, {0x2d, 0x00a0}, {0x2e, 0x00a0},
983 {0x02, 0x0016}, {0x03, 0x01e1}, {0x04, 0x0281},
984 {0x05, 0x0004}, {0x06, 0x002d}, {0x07, 0x3002},
985 {0x0e, 0x0008}, {0x06, 0x002d}, {0x05, 0x0004},
986};
987
988static u16 mt9v011_init[][2] = {
989 {0x07, 0x0002}, {0x0d, 0x0001}, {0x0d, 0x0000},
990 {0x01, 0x0008}, {0x02, 0x0016}, {0x03, 0x01e1},
991 {0x04, 0x0281}, {0x05, 0x0083}, {0x06, 0x0006},
992 {0x0d, 0x0002}, {0x0a, 0x0000}, {0x0b, 0x0000},
993 {0x0c, 0x0000}, {0x0d, 0x0000}, {0x0e, 0x0000},
994 {0x0f, 0x0000}, {0x10, 0x0000}, {0x11, 0x0000},
995 {0x12, 0x0000}, {0x13, 0x0000}, {0x14, 0x0000},
996 {0x15, 0x0000}, {0x16, 0x0000}, {0x17, 0x0000},
997 {0x18, 0x0000}, {0x19, 0x0000}, {0x1a, 0x0000},
998 {0x1b, 0x0000}, {0x1c, 0x0000}, {0x1d, 0x0000},
999 {0x32, 0x0000}, {0x20, 0x1101}, {0x21, 0x0000},
1000 {0x22, 0x0000}, {0x23, 0x0000}, {0x24, 0x0000},
1001 {0x25, 0x0000}, {0x26, 0x0000}, {0x27, 0x0024},
1002 {0x2f, 0xf7b0}, {0x30, 0x0005}, {0x31, 0x0000},
1003 {0x32, 0x0000}, {0x33, 0x0000}, {0x34, 0x0100},
1004 {0x3d, 0x068f}, {0x40, 0x01e0}, {0x41, 0x00d1},
1005 {0x44, 0x0082}, {0x5a, 0x0000}, {0x5b, 0x0000},
1006 {0x5c, 0x0000}, {0x5d, 0x0000}, {0x5e, 0x0000},
1007 {0x5f, 0xa31d}, {0x62, 0x0611}, {0x0a, 0x0000},
1008 {0x06, 0x0029}, {0x05, 0x0009}, {0x20, 0x1101},
1009 {0x20, 0x1101}, {0x09, 0x0064}, {0x07, 0x0003},
1010 {0x2b, 0x0033}, {0x2c, 0x00a0}, {0x2d, 0x00a0},
1011 {0x2e, 0x0033}, {0x07, 0x0002}, {0x06, 0x0000},
1012 {0x06, 0x0029}, {0x05, 0x0009},
1013};
1014
1015static u16 mt9m001_init[][2] = {
1016 {0x0d, 0x0001}, {0x0d, 0x0000}, {0x01, 0x000e},
1017 {0x02, 0x0014}, {0x03, 0x03c1}, {0x04, 0x0501},
1018 {0x05, 0x0083}, {0x06, 0x0006}, {0x0d, 0x0002},
1019 {0x0a, 0x0000}, {0x0c, 0x0000}, {0x11, 0x0000},
1020 {0x1e, 0x8000}, {0x5f, 0x8904}, {0x60, 0x0000},
1021 {0x61, 0x0000}, {0x62, 0x0498}, {0x63, 0x0000},
1022 {0x64, 0x0000}, {0x20, 0x111d}, {0x06, 0x00f2},
1023 {0x05, 0x0013}, {0x09, 0x10f2}, {0x07, 0x0003},
1024 {0x2b, 0x002a}, {0x2d, 0x002a}, {0x2c, 0x002a},
1025 {0x2e, 0x0029}, {0x07, 0x0002},
1026};
1027
1028static u16 mt9m111_init[][2] = {
1029 {0xf0, 0x0000}, {0x0d, 0x0008}, {0x0d, 0x0009},
1030 {0x0d, 0x0008}, {0xf0, 0x0001}, {0x3a, 0x4300},
1031 {0x9b, 0x4300}, {0xa1, 0x0280}, {0xa4, 0x0200},
1032 {0x06, 0x308e}, {0xf0, 0x0000},
1033};
1034
1035static u8 hv7131r_init[][2] = {
1036 {0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08},
1037 {0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0},
1038 {0x22, 0x00}, {0x23, 0x09}, {0x01, 0x08},
1039 {0x01, 0x08}, {0x01, 0x08}, {0x25, 0x07},
1040 {0x26, 0xc3}, {0x27, 0x50}, {0x30, 0x62},
1041 {0x31, 0x10}, {0x32, 0x06}, {0x33, 0x10},
1042 {0x20, 0x00}, {0x21, 0xd0}, {0x22, 0x00},
1043 {0x23, 0x09}, {0x01, 0x08},
1044};
1045
1046int reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length)
1047{
1048 struct usb_device *dev = gspca_dev->dev;
1049 int result;
1050 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
1051 0x00,
1052 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1053 reg,
1054 0x00,
1055 gspca_dev->usb_buf,
1056 length,
1057 500);
1058 if (unlikely(result < 0 || result != length)) {
1059 err("Read register failed 0x%02X", reg);
1060 return -EIO;
1061 }
1062 return 0;
1063}
1064
1065int reg_w(struct gspca_dev *gspca_dev, u16 reg, const u8 *buffer, int length)
1066{
1067 struct usb_device *dev = gspca_dev->dev;
1068 int result;
1069 memcpy(gspca_dev->usb_buf, buffer, length);
1070 result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1071 0x08,
1072 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1073 reg,
1074 0x00,
1075 gspca_dev->usb_buf,
1076 length,
1077 500);
1078 if (unlikely(result < 0 || result != length)) {
1079 err("Write register failed index 0x%02X", reg);
1080 return -EIO;
1081 }
1082 return 0;
1083}
1084
1085int reg_w1(struct gspca_dev *gspca_dev, u16 reg, const u8 value)
1086{
1087 u8 data[1] = {value};
1088 return reg_w(gspca_dev, reg, data, 1);
1089}
1090
1091int i2c_w(struct gspca_dev *gspca_dev, const u8 *buffer)
1092{
1093 int i;
1094 reg_w(gspca_dev, 0x10c0, buffer, 8);
1095 for (i = 0; i < 5; i++) {
1096 reg_r(gspca_dev, 0x10c0, 1);
1097 if (gspca_dev->usb_buf[0] & 0x04) {
1098 if (gspca_dev->usb_buf[0] & 0x08)
1099 return -1;
1100 return 0;
1101 }
1102 msleep(1);
1103 }
1104 return -1;
1105}
1106
1107int i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
1108{
1109 struct sd *sd = (struct sd *) gspca_dev;
1110
1111 u8 row[8];
1112
1113 /*
1114 * from the point of view of the bridge, the length
1115 * includes the address
1116 */
1117 row[0] = 0x81 | (2 << 4);
1118 row[1] = sd->i2c_addr;
1119 row[2] = reg;
1120 row[3] = val;
1121 row[4] = 0x00;
1122 row[5] = 0x00;
1123 row[6] = 0x00;
1124 row[7] = 0x10;
1125
1126 return i2c_w(gspca_dev, row);
1127}
1128
1129int i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val)
1130{
1131 struct sd *sd = (struct sd *) gspca_dev;
1132 u8 row[8];
1133
1134 /*
1135 * from the point of view of the bridge, the length
1136 * includes the address
1137 */
1138 row[0] = 0x81 | (3 << 4);
1139 row[1] = sd->i2c_addr;
1140 row[2] = reg;
1141 row[3] = (val >> 8) & 0xff;
1142 row[4] = val & 0xff;
1143 row[5] = 0x00;
1144 row[6] = 0x00;
1145 row[7] = 0x10;
1146
1147 return i2c_w(gspca_dev, row);
1148}
1149
1150int i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val)
1151{
1152 struct sd *sd = (struct sd *) gspca_dev;
1153 u8 row[8];
1154
1155 row[0] = 0x81 | 0x10;
1156 row[1] = sd->i2c_addr;
1157 row[2] = reg;
1158 row[3] = 0;
1159 row[4] = 0;
1160 row[5] = 0;
1161 row[6] = 0;
1162 row[7] = 0x10;
1163 reg_w(gspca_dev, 0x10c0, row, 8);
1164 msleep(1);
1165 row[0] = 0x81 | (2 << 4) | 0x02;
1166 row[2] = 0;
1167 reg_w(gspca_dev, 0x10c0, row, 8);
1168 msleep(1);
1169 reg_r(gspca_dev, 0x10c2, 5);
1170 *val = gspca_dev->usb_buf[3];
1171 return 0;
1172}
1173
1174int i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val)
1175{
1176 struct sd *sd = (struct sd *) gspca_dev;
1177 u8 row[8];
1178
1179 row[0] = 0x81 | 0x10;
1180 row[1] = sd->i2c_addr;
1181 row[2] = reg;
1182 row[3] = 0;
1183 row[4] = 0;
1184 row[5] = 0;
1185 row[6] = 0;
1186 row[7] = 0x10;
1187 reg_w(gspca_dev, 0x10c0, row, 8);
1188 msleep(1);
1189 row[0] = 0x81 | (3 << 4) | 0x02;
1190 row[2] = 0;
1191 reg_w(gspca_dev, 0x10c0, row, 8);
1192 msleep(1);
1193 reg_r(gspca_dev, 0x10c2, 5);
1194 *val = (gspca_dev->usb_buf[2] << 8) | gspca_dev->usb_buf[3];
1195 return 0;
1196}
1197
1198static int ov9650_init_sensor(struct gspca_dev *gspca_dev)
1199{
1200 int i;
1201 struct sd *sd = (struct sd *) gspca_dev;
1202
1203 for (i = 0; i < ARRAY_SIZE(ov9650_init); i++) {
1204 if (i2c_w1(gspca_dev, ov9650_init[i][0],
1205 ov9650_init[i][1]) < 0) {
1206 err("OV9650 sensor initialization failed");
1207 return -ENODEV;
1208 }
1209 }
1210 sd->hstart = 1;
1211 sd->vstart = 7;
1212 return 0;
1213}
1214
1215static int ov9655_init_sensor(struct gspca_dev *gspca_dev)
1216{
1217 int i;
1218 struct sd *sd = (struct sd *) gspca_dev;
1219
1220 for (i = 0; i < ARRAY_SIZE(ov9655_init); i++) {
1221 if (i2c_w1(gspca_dev, ov9655_init[i][0],
1222 ov9655_init[i][1]) < 0) {
1223 err("OV9655 sensor initialization failed");
1224 return -ENODEV;
1225 }
1226 }
1227 /* disable hflip and vflip */
1228 gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX);
1229 sd->hstart = 0;
1230 sd->vstart = 7;
1231 return 0;
1232}
1233
1234static int soi968_init_sensor(struct gspca_dev *gspca_dev)
1235{
1236 int i;
1237 struct sd *sd = (struct sd *) gspca_dev;
1238
1239 for (i = 0; i < ARRAY_SIZE(soi968_init); i++) {
1240 if (i2c_w1(gspca_dev, soi968_init[i][0],
1241 soi968_init[i][1]) < 0) {
1242 err("SOI968 sensor initialization failed");
1243 return -ENODEV;
1244 }
1245 }
1246 /* disable hflip and vflip */
1247 gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX);
1248 sd->hstart = 60;
1249 sd->vstart = 11;
1250 return 0;
1251}
1252
1253static int ov7660_init_sensor(struct gspca_dev *gspca_dev)
1254{
1255 int i;
1256 struct sd *sd = (struct sd *) gspca_dev;
1257
1258 for (i = 0; i < ARRAY_SIZE(ov7660_init); i++) {
1259 if (i2c_w1(gspca_dev, ov7660_init[i][0],
1260 ov7660_init[i][1]) < 0) {
1261 err("OV7660 sensor initialization failed");
1262 return -ENODEV;
1263 }
1264 }
1265 /* disable hflip and vflip */
1266 gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX);
1267 sd->hstart = 1;
1268 sd->vstart = 1;
1269 return 0;
1270}
1271
1272static int ov7670_init_sensor(struct gspca_dev *gspca_dev)
1273{
1274 int i;
1275 struct sd *sd = (struct sd *) gspca_dev;
1276
1277 for (i = 0; i < ARRAY_SIZE(ov7670_init); i++) {
1278 if (i2c_w1(gspca_dev, ov7670_init[i][0],
1279 ov7670_init[i][1]) < 0) {
1280 err("OV7670 sensor initialization failed");
1281 return -ENODEV;
1282 }
1283 }
1284 /* disable hflip and vflip */
1285 gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX);
1286 sd->hstart = 0;
1287 sd->vstart = 1;
1288 return 0;
1289}
1290
1291static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
1292{
1293 struct sd *sd = (struct sd *) gspca_dev;
1294 int i;
1295 u16 value;
1296 int ret;
1297
1298 sd->i2c_addr = 0x5d;
1299 ret = i2c_r2(gspca_dev, 0xff, &value);
1300 if ((ret == 0) && (value == 0x8243)) {
1301 for (i = 0; i < ARRAY_SIZE(mt9v011_init); i++) {
1302 if (i2c_w2(gspca_dev, mt9v011_init[i][0],
1303 mt9v011_init[i][1]) < 0) {
1304 err("MT9V011 sensor initialization failed");
1305 return -ENODEV;
1306 }
1307 }
1308 sd->hstart = 2;
1309 sd->vstart = 2;
1310 sd->sensor = SENSOR_MT9V011;
1311 info("MT9V011 sensor detected");
1312 return 0;
1313 }
1314
1315 sd->i2c_addr = 0x5c;
1316 i2c_w2(gspca_dev, 0x01, 0x0004);
1317 ret = i2c_r2(gspca_dev, 0xff, &value);
1318 if ((ret == 0) && (value == 0x823a)) {
1319 for (i = 0; i < ARRAY_SIZE(mt9v111_init); i++) {
1320 if (i2c_w2(gspca_dev, mt9v111_init[i][0],
1321 mt9v111_init[i][1]) < 0) {
1322 err("MT9V111 sensor initialization failed");
1323 return -ENODEV;
1324 }
1325 }
1326 sd->hstart = 2;
1327 sd->vstart = 2;
1328 sd->sensor = SENSOR_MT9V111;
1329 info("MT9V111 sensor detected");
1330 return 0;
1331 }
1332
1333 sd->i2c_addr = 0x5d;
1334 ret = i2c_w2(gspca_dev, 0xf0, 0x0000);
1335 if (ret < 0) {
1336 sd->i2c_addr = 0x48;
1337 i2c_w2(gspca_dev, 0xf0, 0x0000);
1338 }
1339 ret = i2c_r2(gspca_dev, 0x00, &value);
1340 if ((ret == 0) && (value == 0x1229)) {
1341 for (i = 0; i < ARRAY_SIZE(mt9v112_init); i++) {
1342 if (i2c_w2(gspca_dev, mt9v112_init[i][0],
1343 mt9v112_init[i][1]) < 0) {
1344 err("MT9V112 sensor initialization failed");
1345 return -ENODEV;
1346 }
1347 }
1348 sd->hstart = 6;
1349 sd->vstart = 2;
1350 sd->sensor = SENSOR_MT9V112;
1351 info("MT9V112 sensor detected");
1352 return 0;
1353 }
1354
1355 return -ENODEV;
1356}
1357
1358static int mt9m111_init_sensor(struct gspca_dev *gspca_dev)
1359{
1360 struct sd *sd = (struct sd *) gspca_dev;
1361 int i;
1362 for (i = 0; i < ARRAY_SIZE(mt9m111_init); i++) {
1363 if (i2c_w2(gspca_dev, mt9m111_init[i][0],
1364 mt9m111_init[i][1]) < 0) {
1365 err("MT9M111 sensor initialization failed");
1366 return -ENODEV;
1367 }
1368 }
1369 sd->hstart = 0;
1370 sd->vstart = 2;
1371 return 0;
1372}
1373
1374static int mt9m001_init_sensor(struct gspca_dev *gspca_dev)
1375{
1376 struct sd *sd = (struct sd *) gspca_dev;
1377 int i;
1378 for (i = 0; i < ARRAY_SIZE(mt9m001_init); i++) {
1379 if (i2c_w2(gspca_dev, mt9m001_init[i][0],
1380 mt9m001_init[i][1]) < 0) {
1381 err("MT9M001 sensor initialization failed");
1382 return -ENODEV;
1383 }
1384 }
1385 /* disable hflip and vflip */
1386 gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX);
1387 sd->hstart = 2;
1388 sd->vstart = 2;
1389 return 0;
1390}
1391
1392static int hv7131r_init_sensor(struct gspca_dev *gspca_dev)
1393{
1394 int i;
1395 struct sd *sd = (struct sd *) gspca_dev;
1396
1397 for (i = 0; i < ARRAY_SIZE(hv7131r_init); i++) {
1398 if (i2c_w1(gspca_dev, hv7131r_init[i][0],
1399 hv7131r_init[i][1]) < 0) {
1400 err("HV7131R Sensor initialization failed");
1401 return -ENODEV;
1402 }
1403 }
1404 sd->hstart = 0;
1405 sd->vstart = 1;
1406 return 0;
1407}
1408
1409#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
1410static int input_kthread(void *data)
1411{
1412 struct gspca_dev *gspca_dev = (struct gspca_dev *)data;
1413 struct sd *sd = (struct sd *) gspca_dev;
1414
1415 DECLARE_WAIT_QUEUE_HEAD(wait);
1416 set_freezable();
1417 for (;;) {
1418 if (kthread_should_stop())
1419 break;
1420
1421 if (reg_r(gspca_dev, 0x1005, 1) < 0)
1422 continue;
1423
1424 input_report_key(sd->input_dev,
1425 KEY_CAMERA,
1426 gspca_dev->usb_buf[0] & sd->input_gpio);
1427 input_sync(sd->input_dev);
1428
1429 wait_event_freezable_timeout(wait,
1430 kthread_should_stop(),
1431 msecs_to_jiffies(100));
1432 }
1433 return 0;
1434}
1435
1436
1437static int sn9c20x_input_init(struct gspca_dev *gspca_dev)
1438{
1439 struct sd *sd = (struct sd *) gspca_dev;
1440 if (sd->input_gpio == 0)
1441 return 0;
1442
1443 sd->input_dev = input_allocate_device();
1444 if (!sd->input_dev)
1445 return -ENOMEM;
1446
1447 sd->input_dev->name = "SN9C20X Webcam";
1448
1449 sd->input_dev->phys = kasprintf(GFP_KERNEL, "usb-%s-%s",
1450 gspca_dev->dev->bus->bus_name,
1451 gspca_dev->dev->devpath);
1452
1453 if (!sd->input_dev->phys)
1454 return -ENOMEM;
1455
1456 usb_to_input_id(gspca_dev->dev, &sd->input_dev->id);
1457 sd->input_dev->dev.parent = &gspca_dev->dev->dev;
1458
1459 set_bit(EV_KEY, sd->input_dev->evbit);
1460 set_bit(KEY_CAMERA, sd->input_dev->keybit);
1461
1462 if (input_register_device(sd->input_dev))
1463 return -EINVAL;
1464
1465 sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%d",
1466 gspca_dev->vdev.minor);
1467
1468 if (IS_ERR(sd->input_task))
1469 return -EINVAL;
1470
1471 return 0;
1472}
1473
1474static void sn9c20x_input_cleanup(struct gspca_dev *gspca_dev)
1475{
1476 struct sd *sd = (struct sd *) gspca_dev;
1477 if (sd->input_task != NULL && !IS_ERR(sd->input_task))
1478 kthread_stop(sd->input_task);
1479
1480 if (sd->input_dev != NULL) {
1481 input_unregister_device(sd->input_dev);
1482 kfree(sd->input_dev->phys);
1483 input_free_device(sd->input_dev);
1484 sd->input_dev = NULL;
1485 }
1486}
1487#endif
1488
1489static int set_cmatrix(struct gspca_dev *gspca_dev)
1490{
1491 struct sd *sd = (struct sd *) gspca_dev;
1492 s32 hue_coord, hue_index = 180 + sd->hue;
1493 u8 cmatrix[21];
1494 memset(cmatrix, 0, 21);
1495
1496 cmatrix[2] = (sd->contrast * 0x25 / 0x100) + 0x26;
1497 cmatrix[0] = 0x13 + (cmatrix[2] - 0x26) * 0x13 / 0x25;
1498 cmatrix[4] = 0x07 + (cmatrix[2] - 0x26) * 0x07 / 0x25;
1499 cmatrix[18] = sd->brightness - 0x80;
1500
1501 hue_coord = (hsv_red_x[hue_index] * sd->saturation) >> 8;
1502 cmatrix[6] = (unsigned char)(hue_coord & 0xff);
1503 cmatrix[7] = (unsigned char)((hue_coord >> 8) & 0x0f);
1504
1505 hue_coord = (hsv_red_y[hue_index] * sd->saturation) >> 8;
1506 cmatrix[8] = (unsigned char)(hue_coord & 0xff);
1507 cmatrix[9] = (unsigned char)((hue_coord >> 8) & 0x0f);
1508
1509 hue_coord = (hsv_green_x[hue_index] * sd->saturation) >> 8;
1510 cmatrix[10] = (unsigned char)(hue_coord & 0xff);
1511 cmatrix[11] = (unsigned char)((hue_coord >> 8) & 0x0f);
1512
1513 hue_coord = (hsv_green_y[hue_index] * sd->saturation) >> 8;
1514 cmatrix[12] = (unsigned char)(hue_coord & 0xff);
1515 cmatrix[13] = (unsigned char)((hue_coord >> 8) & 0x0f);
1516
1517 hue_coord = (hsv_blue_x[hue_index] * sd->saturation) >> 8;
1518 cmatrix[14] = (unsigned char)(hue_coord & 0xff);
1519 cmatrix[15] = (unsigned char)((hue_coord >> 8) & 0x0f);
1520
1521 hue_coord = (hsv_blue_y[hue_index] * sd->saturation) >> 8;
1522 cmatrix[16] = (unsigned char)(hue_coord & 0xff);
1523 cmatrix[17] = (unsigned char)((hue_coord >> 8) & 0x0f);
1524
1525 return reg_w(gspca_dev, 0x10e1, cmatrix, 21);
1526}
1527
1528static int set_gamma(struct gspca_dev *gspca_dev)
1529{
1530 struct sd *sd = (struct sd *) gspca_dev;
1531 u8 gamma[17];
1532 u8 gval = sd->gamma * 0xb8 / 0x100;
1533
1534
1535 gamma[0] = 0x0a;
1536 gamma[1] = 0x13 + (gval * (0xcb - 0x13) / 0xb8);
1537 gamma[2] = 0x25 + (gval * (0xee - 0x25) / 0xb8);
1538 gamma[3] = 0x37 + (gval * (0xfa - 0x37) / 0xb8);
1539 gamma[4] = 0x45 + (gval * (0xfc - 0x45) / 0xb8);
1540 gamma[5] = 0x55 + (gval * (0xfb - 0x55) / 0xb8);
1541 gamma[6] = 0x65 + (gval * (0xfc - 0x65) / 0xb8);
1542 gamma[7] = 0x74 + (gval * (0xfd - 0x74) / 0xb8);
1543 gamma[8] = 0x83 + (gval * (0xfe - 0x83) / 0xb8);
1544 gamma[9] = 0x92 + (gval * (0xfc - 0x92) / 0xb8);
1545 gamma[10] = 0xa1 + (gval * (0xfc - 0xa1) / 0xb8);
1546 gamma[11] = 0xb0 + (gval * (0xfc - 0xb0) / 0xb8);
1547 gamma[12] = 0xbf + (gval * (0xfb - 0xbf) / 0xb8);
1548 gamma[13] = 0xce + (gval * (0xfb - 0xce) / 0xb8);
1549 gamma[14] = 0xdf + (gval * (0xfd - 0xdf) / 0xb8);
1550 gamma[15] = 0xea + (gval * (0xf9 - 0xea) / 0xb8);
1551 gamma[16] = 0xf5;
1552
1553 return reg_w(gspca_dev, 0x1190, gamma, 17);
1554}
1555
1556static int set_redblue(struct gspca_dev *gspca_dev)
1557{
1558 struct sd *sd = (struct sd *) gspca_dev;
1559 reg_w1(gspca_dev, 0x118c, sd->red);
1560 reg_w1(gspca_dev, 0x118f, sd->blue);
1561 return 0;
1562}
1563
1564static int set_hvflip(struct gspca_dev *gspca_dev)
1565{
1566 u8 value, tslb;
1567 u16 value2;
1568 struct sd *sd = (struct sd *) gspca_dev;
1569 switch (sd->sensor) {
1570 case SENSOR_OV9650:
1571 i2c_r1(gspca_dev, 0x1e, &value);
1572 value &= ~0x30;
1573 tslb = 0x01;
1574 if (sd->hflip)
1575 value |= 0x20;
1576 if (sd->vflip) {
1577 value |= 0x10;
1578 tslb = 0x49;
1579 }
1580 i2c_w1(gspca_dev, 0x1e, value);
1581 i2c_w1(gspca_dev, 0x3a, tslb);
1582 break;
1583 case SENSOR_MT9V111:
1584 case SENSOR_MT9V011:
1585 i2c_r2(gspca_dev, 0x20, &value2);
1586 value2 &= ~0xc0a0;
1587 if (sd->hflip)
1588 value2 |= 0x8080;
1589 if (sd->vflip)
1590 value2 |= 0x4020;
1591 i2c_w2(gspca_dev, 0x20, value2);
1592 break;
1593 case SENSOR_MT9M111:
1594 case SENSOR_MT9V112:
1595 i2c_r2(gspca_dev, 0x20, &value2);
1596 value2 &= ~0x0003;
1597 if (sd->hflip)
1598 value2 |= 0x0002;
1599 if (sd->vflip)
1600 value2 |= 0x0001;
1601 i2c_w2(gspca_dev, 0x20, value2);
1602 break;
1603 case SENSOR_HV7131R:
1604 i2c_r1(gspca_dev, 0x01, &value);
1605 value &= ~0x03;
1606 if (sd->vflip)
1607 value |= 0x01;
1608 if (sd->hflip)
1609 value |= 0x02;
1610 i2c_w1(gspca_dev, 0x01, value);
1611 break;
1612 }
1613 return 0;
1614}
1615
1616static int set_exposure(struct gspca_dev *gspca_dev)
1617{
1618 struct sd *sd = (struct sd *) gspca_dev;
1619 u8 exp[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e};
1620 switch (sd->sensor) {
1621 case SENSOR_OV7660:
1622 case SENSOR_OV7670:
1623 case SENSOR_SOI968:
1624 case SENSOR_OV9655:
1625 case SENSOR_OV9650:
1626 exp[0] |= (3 << 4);
1627 exp[2] = 0x2d;
1628 exp[3] = sd->exposure & 0xff;
1629 exp[4] = sd->exposure >> 8;
1630 break;
1631 case SENSOR_MT9M001:
1632 case SENSOR_MT9M111:
1633 case SENSOR_MT9V112:
1634 case SENSOR_MT9V111:
1635 case SENSOR_MT9V011:
1636 exp[0] |= (3 << 4);
1637 exp[2] = 0x09;
1638 exp[3] = sd->exposure >> 8;
1639 exp[4] = sd->exposure & 0xff;
1640 break;
1641 case SENSOR_HV7131R:
1642 exp[0] |= (4 << 4);
1643 exp[2] = 0x25;
1644 exp[3] = ((sd->exposure * 0xffffff) / 0xffff) >> 16;
1645 exp[4] = ((sd->exposure * 0xffffff) / 0xffff) >> 8;
1646 exp[5] = ((sd->exposure * 0xffffff) / 0xffff) & 0xff;
1647 break;
1648 }
1649 i2c_w(gspca_dev, exp);
1650 return 0;
1651}
1652
1653static int set_gain(struct gspca_dev *gspca_dev)
1654{
1655 struct sd *sd = (struct sd *) gspca_dev;
1656 u8 gain[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d};
1657 switch (sd->sensor) {
1658 case SENSOR_OV7660:
1659 case SENSOR_OV7670:
1660 case SENSOR_SOI968:
1661 case SENSOR_OV9655:
1662 case SENSOR_OV9650:
1663 gain[0] |= (2 << 4);
1664 gain[3] = ov_gain[sd->gain];
1665 break;
1666 case SENSOR_MT9V011:
1667 case SENSOR_MT9V111:
1668 gain[0] |= (3 << 4);
1669 gain[2] = 0x35;
1670 gain[3] = micron1_gain[sd->gain] >> 8;
1671 gain[4] = micron1_gain[sd->gain] & 0xff;
1672 break;
1673 case SENSOR_MT9V112:
1674 case SENSOR_MT9M111:
1675 gain[0] |= (3 << 4);
1676 gain[2] = 0x2f;
1677 gain[3] = micron1_gain[sd->gain] >> 8;
1678 gain[4] = micron1_gain[sd->gain] & 0xff;
1679 break;
1680 case SENSOR_MT9M001:
1681 gain[0] |= (3 << 4);
1682 gain[2] = 0x2f;
1683 gain[3] = micron2_gain[sd->gain] >> 8;
1684 gain[4] = micron2_gain[sd->gain] & 0xff;
1685 break;
1686 case SENSOR_HV7131R:
1687 gain[0] |= (2 << 4);
1688 gain[2] = 0x30;
1689 gain[3] = hv7131r_gain[sd->gain];
1690 break;
1691 }
1692 i2c_w(gspca_dev, gain);
1693 return 0;
1694}
1695
1696static int sd_setbrightness(struct gspca_dev *gspca_dev, s32 val)
1697{
1698 struct sd *sd = (struct sd *) gspca_dev;
1699
1700 sd->brightness = val;
1701 if (gspca_dev->streaming)
1702 return set_cmatrix(gspca_dev);
1703 return 0;
1704}
1705
1706static int sd_getbrightness(struct gspca_dev *gspca_dev, s32 *val)
1707{
1708 struct sd *sd = (struct sd *) gspca_dev;
1709 *val = sd->brightness;
1710 return 0;
1711}
1712
1713
1714static int sd_setcontrast(struct gspca_dev *gspca_dev, s32 val)
1715{
1716 struct sd *sd = (struct sd *) gspca_dev;
1717
1718 sd->contrast = val;
1719 if (gspca_dev->streaming)
1720 return set_cmatrix(gspca_dev);
1721 return 0;
1722}
1723
1724static int sd_getcontrast(struct gspca_dev *gspca_dev, s32 *val)
1725{
1726 struct sd *sd = (struct sd *) gspca_dev;
1727 *val = sd->contrast;
1728 return 0;
1729}
1730
1731static int sd_setsaturation(struct gspca_dev *gspca_dev, s32 val)
1732{
1733 struct sd *sd = (struct sd *) gspca_dev;
1734
1735 sd->saturation = val;
1736 if (gspca_dev->streaming)
1737 return set_cmatrix(gspca_dev);
1738 return 0;
1739}
1740
1741static int sd_getsaturation(struct gspca_dev *gspca_dev, s32 *val)
1742{
1743 struct sd *sd = (struct sd *) gspca_dev;
1744 *val = sd->saturation;
1745 return 0;
1746}
1747
1748static int sd_sethue(struct gspca_dev *gspca_dev, s32 val)
1749{
1750 struct sd *sd = (struct sd *) gspca_dev;
1751
1752 sd->hue = val;
1753 if (gspca_dev->streaming)
1754 return set_cmatrix(gspca_dev);
1755 return 0;
1756}
1757
1758static int sd_gethue(struct gspca_dev *gspca_dev, s32 *val)
1759{
1760 struct sd *sd = (struct sd *) gspca_dev;
1761 *val = sd->hue;
1762 return 0;
1763}
1764
1765static int sd_setgamma(struct gspca_dev *gspca_dev, s32 val)
1766{
1767 struct sd *sd = (struct sd *) gspca_dev;
1768
1769 sd->gamma = val;
1770 if (gspca_dev->streaming)
1771 return set_gamma(gspca_dev);
1772 return 0;
1773}
1774
1775static int sd_getgamma(struct gspca_dev *gspca_dev, s32 *val)
1776{
1777 struct sd *sd = (struct sd *) gspca_dev;
1778 *val = sd->gamma;
1779 return 0;
1780}
1781
1782static int sd_setredbalance(struct gspca_dev *gspca_dev, s32 val)
1783{
1784 struct sd *sd = (struct sd *) gspca_dev;
1785
1786 sd->red = val;
1787 if (gspca_dev->streaming)
1788 return set_redblue(gspca_dev);
1789 return 0;
1790}
1791
1792static int sd_getredbalance(struct gspca_dev *gspca_dev, s32 *val)
1793{
1794 struct sd *sd = (struct sd *) gspca_dev;
1795 *val = sd->red;
1796 return 0;
1797}
1798
1799static int sd_setbluebalance(struct gspca_dev *gspca_dev, s32 val)
1800{
1801 struct sd *sd = (struct sd *) gspca_dev;
1802
1803 sd->blue = val;
1804 if (gspca_dev->streaming)
1805 return set_redblue(gspca_dev);
1806 return 0;
1807}
1808
1809static int sd_getbluebalance(struct gspca_dev *gspca_dev, s32 *val)
1810{
1811 struct sd *sd = (struct sd *) gspca_dev;
1812 *val = sd->blue;
1813 return 0;
1814}
1815
1816static int sd_sethflip(struct gspca_dev *gspca_dev, s32 val)
1817{
1818 struct sd *sd = (struct sd *) gspca_dev;
1819
1820 sd->hflip = val;
1821 if (gspca_dev->streaming)
1822 return set_hvflip(gspca_dev);
1823 return 0;
1824}
1825
1826static int sd_gethflip(struct gspca_dev *gspca_dev, s32 *val)
1827{
1828 struct sd *sd = (struct sd *) gspca_dev;
1829 *val = sd->hflip;
1830 return 0;
1831}
1832
1833static int sd_setvflip(struct gspca_dev *gspca_dev, s32 val)
1834{
1835 struct sd *sd = (struct sd *) gspca_dev;
1836
1837 sd->vflip = val;
1838 if (gspca_dev->streaming)
1839 return set_hvflip(gspca_dev);
1840 return 0;
1841}
1842
1843static int sd_getvflip(struct gspca_dev *gspca_dev, s32 *val)
1844{
1845 struct sd *sd = (struct sd *) gspca_dev;
1846 *val = sd->vflip;
1847 return 0;
1848}
1849
1850static int sd_setexposure(struct gspca_dev *gspca_dev, s32 val)
1851{
1852 struct sd *sd = (struct sd *) gspca_dev;
1853
1854 sd->exposure = val;
1855 if (gspca_dev->streaming)
1856 return set_exposure(gspca_dev);
1857 return 0;
1858}
1859
1860static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val)
1861{
1862 struct sd *sd = (struct sd *) gspca_dev;
1863 *val = sd->exposure;
1864 return 0;
1865}
1866
1867static int sd_setgain(struct gspca_dev *gspca_dev, s32 val)
1868{
1869 struct sd *sd = (struct sd *) gspca_dev;
1870
1871 sd->gain = val;
1872 if (gspca_dev->streaming)
1873 return set_gain(gspca_dev);
1874 return 0;
1875}
1876
1877static int sd_getgain(struct gspca_dev *gspca_dev, s32 *val)
1878{
1879 struct sd *sd = (struct sd *) gspca_dev;
1880 *val = sd->gain;
1881 return 0;
1882}
1883
1884static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val)
1885{
1886 struct sd *sd = (struct sd *) gspca_dev;
1887 sd->auto_exposure = val;
1888 return 0;
1889}
1890
1891static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val)
1892{
1893 struct sd *sd = (struct sd *) gspca_dev;
1894 *val = sd->auto_exposure;
1895 return 0;
1896}
1897
1898#ifdef CONFIG_VIDEO_ADV_DEBUG
1899static int sd_dbg_g_register(struct gspca_dev *gspca_dev,
1900 struct v4l2_dbg_register *reg)
1901{
1902 struct sd *sd = (struct sd *) gspca_dev;
1903 switch (reg->match.type) {
1904 case V4L2_CHIP_MATCH_HOST:
1905 if (reg->match.addr != 0)
1906 return -EINVAL;
1907 if (reg->reg < 0x1000 || reg->reg > 0x11ff)
1908 return -EINVAL;
1909 if (reg_r(gspca_dev, reg->reg, 1) < 0)
1910 return -EINVAL;
1911 reg->val = gspca_dev->usb_buf[0];
1912 return 0;
1913 case V4L2_CHIP_MATCH_I2C_ADDR:
1914 if (reg->match.addr != sd->i2c_addr)
1915 return -EINVAL;
1916 if (sd->sensor >= SENSOR_MT9V011 &&
1917 sd->sensor <= SENSOR_MT9M111) {
1918 if (i2c_r2(gspca_dev, reg->reg, (u16 *)&reg->val) < 0)
1919 return -EINVAL;
1920 } else {
1921 if (i2c_r1(gspca_dev, reg->reg, (u8 *)&reg->val) < 0)
1922 return -EINVAL;
1923 }
1924 return 0;
1925 }
1926 return -EINVAL;
1927}
1928
1929static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
1930 struct v4l2_dbg_register *reg)
1931{
1932 struct sd *sd = (struct sd *) gspca_dev;
1933 switch (reg->match.type) {
1934 case V4L2_CHIP_MATCH_HOST:
1935 if (reg->match.addr != 0)
1936 return -EINVAL;
1937 if (reg->reg < 0x1000 || reg->reg > 0x11ff)
1938 return -EINVAL;
1939 if (reg_w1(gspca_dev, reg->reg, reg->val) < 0)
1940 return -EINVAL;
1941 return 0;
1942 case V4L2_CHIP_MATCH_I2C_ADDR:
1943 if (reg->match.addr != sd->i2c_addr)
1944 return -EINVAL;
1945 if (sd->sensor >= SENSOR_MT9V011 &&
1946 sd->sensor <= SENSOR_MT9M111) {
1947 if (i2c_w2(gspca_dev, reg->reg, reg->val) < 0)
1948 return -EINVAL;
1949 } else {
1950 if (i2c_w1(gspca_dev, reg->reg, reg->val) < 0)
1951 return -EINVAL;
1952 }
1953 return 0;
1954 }
1955 return -EINVAL;
1956}
1957#endif
1958
1959static int sd_chip_ident(struct gspca_dev *gspca_dev,
1960 struct v4l2_dbg_chip_ident *chip)
1961{
1962 struct sd *sd = (struct sd *) gspca_dev;
1963
1964 switch (chip->match.type) {
1965 case V4L2_CHIP_MATCH_HOST:
1966 if (chip->match.addr != 0)
1967 return -EINVAL;
1968 chip->revision = 0;
1969 chip->ident = V4L2_IDENT_SN9C20X;
1970 return 0;
1971 case V4L2_CHIP_MATCH_I2C_ADDR:
1972 if (chip->match.addr != sd->i2c_addr)
1973 return -EINVAL;
1974 chip->revision = 0;
1975 chip->ident = i2c_ident[sd->sensor];
1976 return 0;
1977 }
1978 return -EINVAL;
1979}
1980
1981static int sd_config(struct gspca_dev *gspca_dev,
1982 const struct usb_device_id *id)
1983{
1984 struct sd *sd = (struct sd *) gspca_dev;
1985 struct cam *cam;
1986
1987 cam = &gspca_dev->cam;
1988
1989 sd->sensor = (id->driver_info >> 8) & 0xff;
1990 sd->i2c_addr = id->driver_info & 0xff;
1991
1992 switch (sd->sensor) {
1993 case SENSOR_OV9650:
1994 cam->cam_mode = sxga_mode;
1995 cam->nmodes = ARRAY_SIZE(sxga_mode);
1996 break;
1997 default:
1998 cam->cam_mode = vga_mode;
1999 cam->nmodes = ARRAY_SIZE(vga_mode);
2000 }
2001
2002 sd->old_step = 0;
2003 sd->older_step = 0;
2004 sd->exposure_step = 16;
2005
2006 sd->brightness = BRIGHTNESS_DEFAULT;
2007 sd->contrast = CONTRAST_DEFAULT;
2008 sd->saturation = SATURATION_DEFAULT;
2009 sd->hue = HUE_DEFAULT;
2010 sd->gamma = GAMMA_DEFAULT;
2011 sd->red = RED_DEFAULT;
2012 sd->blue = BLUE_DEFAULT;
2013
2014 sd->hflip = HFLIP_DEFAULT;
2015 sd->vflip = VFLIP_DEFAULT;
2016 sd->exposure = EXPOSURE_DEFAULT;
2017 sd->gain = GAIN_DEFAULT;
2018 sd->auto_exposure = AUTO_EXPOSURE_DEFAULT;
2019
2020 sd->quality = 95;
2021
2022#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
2023 sd->input_gpio = (id->driver_info >> 16) & 0xff;
2024 if (sn9c20x_input_init(gspca_dev) < 0)
2025 return -ENODEV;
2026#endif
2027 return 0;
2028}
2029
2030static int sd_init(struct gspca_dev *gspca_dev)
2031{
2032 struct sd *sd = (struct sd *) gspca_dev;
2033 int i;
2034 u8 value;
2035 u8 i2c_init[9] =
2036 {0x80, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03};
2037
2038 for (i = 0; i < ARRAY_SIZE(bridge_init); i++) {
2039 value = bridge_init[i][1];
2040 if (reg_w(gspca_dev, bridge_init[i][0], &value, 1) < 0) {
2041 err("Device initialization failed");
2042 return -ENODEV;
2043 }
2044 }
2045
2046 if (reg_w(gspca_dev, 0x10c0, i2c_init, 9) < 0) {
2047 err("Device initialization failed");
2048 return -ENODEV;
2049 }
2050
2051 switch (sd->sensor) {
2052 case SENSOR_OV9650:
2053 if (ov9650_init_sensor(gspca_dev) < 0)
2054 return -ENODEV;
2055 info("OV9650 sensor detected");
2056 break;
2057 case SENSOR_OV9655:
2058 if (ov9655_init_sensor(gspca_dev) < 0)
2059 return -ENODEV;
2060 info("OV9655 sensor detected");
2061 break;
2062 case SENSOR_SOI968:
2063 if (soi968_init_sensor(gspca_dev) < 0)
2064 return -ENODEV;
2065 info("SOI968 sensor detected");
2066 break;
2067 case SENSOR_OV7660:
2068 if (ov7660_init_sensor(gspca_dev) < 0)
2069 return -ENODEV;
2070 info("OV7660 sensor detected");
2071 break;
2072 case SENSOR_OV7670:
2073 if (ov7670_init_sensor(gspca_dev) < 0)
2074 return -ENODEV;
2075 info("OV7670 sensor detected");
2076 break;
2077 case SENSOR_MT9VPRB:
2078 if (mt9v_init_sensor(gspca_dev) < 0)
2079 return -ENODEV;
2080 break;
2081 case SENSOR_MT9M111:
2082 if (mt9m111_init_sensor(gspca_dev) < 0)
2083 return -ENODEV;
2084 info("MT9M111 sensor detected");
2085 break;
2086 case SENSOR_MT9M001:
2087 if (mt9m001_init_sensor(gspca_dev) < 0)
2088 return -ENODEV;
2089 info("MT9M001 sensor detected");
2090 break;
2091 case SENSOR_HV7131R:
2092 if (hv7131r_init_sensor(gspca_dev) < 0)
2093 return -ENODEV;
2094 info("HV7131R sensor detected");
2095 break;
2096 default:
2097 info("Unsupported Sensor");
2098 return -ENODEV;
2099 }
2100
2101 return 0;
2102}
2103
2104static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode)
2105{
2106 struct sd *sd = (struct sd *) gspca_dev;
2107 u8 value;
2108 switch (sd->sensor) {
2109 case SENSOR_OV9650:
2110 if (mode & MODE_SXGA) {
2111 i2c_w1(gspca_dev, 0x17, 0x1b);
2112 i2c_w1(gspca_dev, 0x18, 0xbc);
2113 i2c_w1(gspca_dev, 0x19, 0x01);
2114 i2c_w1(gspca_dev, 0x1a, 0x82);
2115 i2c_r1(gspca_dev, 0x12, &value);
2116 i2c_w1(gspca_dev, 0x12, value & 0x07);
2117 } else {
2118 i2c_w1(gspca_dev, 0x17, 0x24);
2119 i2c_w1(gspca_dev, 0x18, 0xc5);
2120 i2c_w1(gspca_dev, 0x19, 0x00);
2121 i2c_w1(gspca_dev, 0x1a, 0x3c);
2122 i2c_r1(gspca_dev, 0x12, &value);
2123 i2c_w1(gspca_dev, 0x12, (value & 0x7) | 0x40);
2124 }
2125 break;
2126 }
2127}
2128
2129#define HW_WIN(mode, hstart, vstart) \
2130((const u8 []){hstart & 0xff, hstart >> 8, \
2131vstart & 0xff, vstart >> 8, \
2132(mode & MODE_SXGA ? 1280 >> 4 : 640 >> 4), \
2133(mode & MODE_SXGA ? 1024 >> 3 : 480 >> 3)})
2134
2135#define CLR_WIN(width, height) \
2136((const u8 [])\
2137{0, width >> 2, 0, height >> 1,\
2138((width >> 10) & 0x01) | ((height >> 8) & 0x6)})
2139
2140static int sd_start(struct gspca_dev *gspca_dev)
2141{
2142 struct sd *sd = (struct sd *) gspca_dev;
2143 int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
2144 int width = gspca_dev->width;
2145 int height = gspca_dev->height;
2146 u8 fmt, scale = 0;
2147
2148 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
2149 if (sd->jpeg_hdr == NULL)
2150 return -ENOMEM;
2151
2152 jpeg_define(sd->jpeg_hdr, height, width,
2153 0x21);
2154 jpeg_set_qual(sd->jpeg_hdr, sd->quality);
2155
2156 if (mode & MODE_RAW)
2157 fmt = 0x2d;
2158 else if (mode & MODE_JPEG)
2159 fmt = 0x2c;
2160 else
2161 fmt = 0x2f;
2162
2163 switch (mode & 0x0f) {
2164 case 3:
2165 scale = 0xc0;
2166 info("Set 1280x1024");
2167 break;
2168 case 2:
2169 scale = 0x80;
2170 info("Set 640x480");
2171 break;
2172 case 1:
2173 scale = 0x90;
2174 info("Set 320x240");
2175 break;
2176 case 0:
2177 scale = 0xa0;
2178 info("Set 160x120");
2179 break;
2180 }
2181
2182 configure_sensor_output(gspca_dev, mode);
2183 reg_w(gspca_dev, 0x1100, sd->jpeg_hdr + JPEG_QT0_OFFSET, 64);
2184 reg_w(gspca_dev, 0x1140, sd->jpeg_hdr + JPEG_QT1_OFFSET, 64);
2185 reg_w(gspca_dev, 0x10fb, CLR_WIN(width, height), 5);
2186 reg_w(gspca_dev, 0x1180, HW_WIN(mode, sd->hstart, sd->vstart), 6);
2187 reg_w1(gspca_dev, 0x1189, scale);
2188 reg_w1(gspca_dev, 0x10e0, fmt);
2189
2190 set_cmatrix(gspca_dev);
2191 set_gamma(gspca_dev);
2192 set_redblue(gspca_dev);
2193 set_gain(gspca_dev);
2194 set_exposure(gspca_dev);
2195 set_hvflip(gspca_dev);
2196
2197 reg_r(gspca_dev, 0x1061, 1);
2198 reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] | 0x02);
2199 return 0;
2200}
2201
2202static void sd_stopN(struct gspca_dev *gspca_dev)
2203{
2204 reg_r(gspca_dev, 0x1061, 1);
2205 reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] & ~0x02);
2206}
2207
2208static void sd_stop0(struct gspca_dev *gspca_dev)
2209{
2210 struct sd *sd = (struct sd *) gspca_dev;
2211 kfree(sd->jpeg_hdr);
2212}
2213
2214static void do_autoexposure(struct gspca_dev *gspca_dev)
2215{
2216 struct sd *sd = (struct sd *) gspca_dev;
2217 int avg_lum, new_exp;
2218
2219 if (!sd->auto_exposure)
2220 return;
2221
2222 avg_lum = atomic_read(&sd->avg_lum);
2223
2224 /*
2225 * some hardcoded values are present
2226 * like those for maximal/minimal exposure
2227 * and exposure steps
2228 */
2229 if (avg_lum < MIN_AVG_LUM) {
2230 if (sd->exposure > 0x1770)
2231 return;
2232
2233 new_exp = sd->exposure + sd->exposure_step;
2234 if (new_exp > 0x1770)
2235 new_exp = 0x1770;
2236 if (new_exp < 0x10)
2237 new_exp = 0x10;
2238 sd->exposure = new_exp;
2239 set_exposure(gspca_dev);
2240
2241 sd->older_step = sd->old_step;
2242 sd->old_step = 1;
2243
2244 if (sd->old_step ^ sd->older_step)
2245 sd->exposure_step /= 2;
2246 else
2247 sd->exposure_step += 2;
2248 }
2249 if (avg_lum > MAX_AVG_LUM) {
2250 if (sd->exposure < 0x10)
2251 return;
2252 new_exp = sd->exposure - sd->exposure_step;
2253 if (new_exp > 0x1700)
2254 new_exp = 0x1770;
2255 if (new_exp < 0x10)
2256 new_exp = 0x10;
2257 sd->exposure = new_exp;
2258 set_exposure(gspca_dev);
2259 sd->older_step = sd->old_step;
2260 sd->old_step = 0;
2261
2262 if (sd->old_step ^ sd->older_step)
2263 sd->exposure_step /= 2;
2264 else
2265 sd->exposure_step += 2;
2266 }
2267}
2268
2269static void sd_pkt_scan(struct gspca_dev *gspca_dev,
2270 struct gspca_frame *frame, /* target */
2271 u8 *data, /* isoc packet */
2272 int len) /* iso packet length */
2273{
2274 struct sd *sd = (struct sd *) gspca_dev;
2275 int avg_lum;
2276 static unsigned char frame_header[] =
2277 {0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96};
2278 if (len == 64 && memcmp(data, frame_header, 6) == 0) {
2279 avg_lum = ((data[35] >> 2) & 3) |
2280 (data[20] << 2) |
2281 (data[19] << 10);
2282 avg_lum += ((data[35] >> 4) & 3) |
2283 (data[22] << 2) |
2284 (data[21] << 10);
2285 avg_lum += ((data[35] >> 6) & 3) |
2286 (data[24] << 2) |
2287 (data[23] << 10);
2288 avg_lum += (data[36] & 3) |
2289 (data[26] << 2) |
2290 (data[25] << 10);
2291 avg_lum += ((data[36] >> 2) & 3) |
2292 (data[28] << 2) |
2293 (data[27] << 10);
2294 avg_lum += ((data[36] >> 4) & 3) |
2295 (data[30] << 2) |
2296 (data[29] << 10);
2297 avg_lum += ((data[36] >> 6) & 3) |
2298 (data[32] << 2) |
2299 (data[31] << 10);
2300 avg_lum += ((data[44] >> 4) & 3) |
2301 (data[34] << 2) |
2302 (data[33] << 10);
2303 avg_lum >>= 9;
2304 atomic_set(&sd->avg_lum, avg_lum);
2305 gspca_frame_add(gspca_dev, LAST_PACKET,
2306 frame, data, len);
2307 return;
2308 }
2309 if (gspca_dev->last_packet_type == LAST_PACKET) {
2310 if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv
2311 & MODE_JPEG) {
2312 gspca_frame_add(gspca_dev, FIRST_PACKET, frame,
2313 sd->jpeg_hdr, JPEG_HDR_SZ);
2314 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
2315 data, len);
2316 } else {
2317 gspca_frame_add(gspca_dev, FIRST_PACKET, frame,
2318 data, len);
2319 }
2320 } else {
2321 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
2322 }
2323}
2324
2325/* sub-driver description */
2326static const struct sd_desc sd_desc = {
2327 .name = MODULE_NAME,
2328 .ctrls = sd_ctrls,
2329 .nctrls = ARRAY_SIZE(sd_ctrls),
2330 .config = sd_config,
2331 .init = sd_init,
2332 .start = sd_start,
2333 .stopN = sd_stopN,
2334 .stop0 = sd_stop0,
2335 .pkt_scan = sd_pkt_scan,
2336 .dq_callback = do_autoexposure,
2337#ifdef CONFIG_VIDEO_ADV_DEBUG
2338 .set_register = sd_dbg_s_register,
2339 .get_register = sd_dbg_g_register,
2340#endif
2341 .get_chip_ident = sd_chip_ident,
2342};
2343
2344#define SN9C20X(sensor, i2c_addr, button_mask) \
2345 .driver_info = (button_mask << 16) \
2346 | (SENSOR_ ## sensor << 8) \
2347 | (i2c_addr)
2348
2349static const __devinitdata struct usb_device_id device_table[] = {
2350 {USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
2351 {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
2352 {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
2353 {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, 0x10)},
2354 {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, 0)},
2355 {USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)},
2356 {USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)},
2357 {USB_DEVICE(0x0c45, 0x6260), SN9C20X(OV7670, 0x21, 0)},
2358 {USB_DEVICE(0x0c45, 0x6270), SN9C20X(MT9VPRB, 0x00, 0)},
2359 {USB_DEVICE(0x0c45, 0x627b), SN9C20X(OV7660, 0x21, 0)},
2360 {USB_DEVICE(0x0c45, 0x627c), SN9C20X(HV7131R, 0x11, 0)},
2361 {USB_DEVICE(0x0c45, 0x627f), SN9C20X(OV9650, 0x30, 0)},
2362 {USB_DEVICE(0x0c45, 0x6280), SN9C20X(MT9M001, 0x5d, 0)},
2363 {USB_DEVICE(0x0c45, 0x6282), SN9C20X(MT9M111, 0x5d, 0)},
2364 {USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, 0)},
2365 {USB_DEVICE(0x0c45, 0x628e), SN9C20X(SOI968, 0x30, 0)},
2366 {USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)},
2367 {USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)},
2368 {USB_DEVICE(0x0c45, 0x62b0), SN9C20X(MT9VPRB, 0x00, 0)},
2369 {USB_DEVICE(0x0c45, 0x62b3), SN9C20X(OV9655, 0x30, 0)},
2370 {USB_DEVICE(0x0c45, 0x62bb), SN9C20X(OV7660, 0x21, 0)},
2371 {USB_DEVICE(0x0c45, 0x62bc), SN9C20X(HV7131R, 0x11, 0)},
2372 {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
2373 {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)},
2374 {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)},
2375 {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)},
2376 {USB_DEVICE(0xa168, 0x0611), SN9C20X(HV7131R, 0x11, 0)},
2377 {USB_DEVICE(0xa168, 0x0613), SN9C20X(HV7131R, 0x11, 0)},
2378 {USB_DEVICE(0xa168, 0x0618), SN9C20X(HV7131R, 0x11, 0)},
2379 {USB_DEVICE(0xa168, 0x0614), SN9C20X(MT9M111, 0x5d, 0)},
2380 {USB_DEVICE(0xa168, 0x0615), SN9C20X(MT9M111, 0x5d, 0)},
2381 {USB_DEVICE(0xa168, 0x0617), SN9C20X(MT9M111, 0x5d, 0)},
2382 {}
2383};
2384MODULE_DEVICE_TABLE(usb, device_table);
2385
2386/* -- device connect -- */
2387static int sd_probe(struct usb_interface *intf,
2388 const struct usb_device_id *id)
2389{
2390 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
2391 THIS_MODULE);
2392}
2393
2394static void sd_disconnect(struct usb_interface *intf)
2395{
2396#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
2397 struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
2398
2399 sn9c20x_input_cleanup(gspca_dev);
2400#endif
2401
2402 gspca_disconnect(intf);
2403}
2404
2405static struct usb_driver sd_driver = {
2406 .name = MODULE_NAME,
2407 .id_table = device_table,
2408 .probe = sd_probe,
2409 .disconnect = sd_disconnect,
2410#ifdef CONFIG_PM
2411 .suspend = gspca_suspend,
2412 .resume = gspca_resume,
2413 .reset_resume = gspca_resume,
2414#endif
2415};
2416
2417/* -- module insert / remove -- */
2418static int __init sd_mod_init(void)
2419{
2420 int ret;
2421 ret = usb_register(&sd_driver);
2422 if (ret < 0)
2423 return ret;
2424 info("registered");
2425 return 0;
2426}
2427static void __exit sd_mod_exit(void)
2428{
2429 usb_deregister(&sd_driver);
2430 info("deregistered");
2431}
2432
2433module_init(sd_mod_init);
2434module_exit(sd_mod_exit);
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 0d02f41fa7d0..d6332ab80669 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -1634,6 +1634,8 @@ static void setfreq(struct gspca_dev *gspca_dev)
1634{ 1634{
1635 struct sd *sd = (struct sd *) gspca_dev; 1635 struct sd *sd = (struct sd *) gspca_dev;
1636 1636
1637 if (gspca_dev->ctrl_dis & (1 << FREQ_IDX))
1638 return;
1637 if (sd->sensor == SENSOR_OV7660) { 1639 if (sd->sensor == SENSOR_OV7660) {
1638 switch (sd->freq) { 1640 switch (sd->freq) {
1639 case 0: /* Banding filter disabled */ 1641 case 0: /* Banding filter disabled */
@@ -1735,6 +1737,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
1735 1737
1736 /* create the JPEG header */ 1738 /* create the JPEG header */
1737 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); 1739 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
1740 if (!sd->jpeg_hdr)
1741 return -ENOMEM;
1738 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 1742 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
1739 0x21); /* JPEG 422 */ 1743 0x21); /* JPEG 422 */
1740 jpeg_set_qual(sd->jpeg_hdr, sd->quality); 1744 jpeg_set_qual(sd->jpeg_hdr, sd->quality);
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 8806b2ff82be..fab7ef85a6c1 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -670,6 +670,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
670 670
671 /* create the JPEG header */ 671 /* create the JPEG header */
672 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); 672 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
673 if (!sd->jpeg_hdr)
674 return -ENOMEM;
673 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 675 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
674 0x22); /* JPEG 411 */ 676 0x22); /* JPEG 411 */
675 jpeg_set_qual(sd->jpeg_hdr, sd->quality); 677 jpeg_set_qual(sd->jpeg_hdr, sd->quality);
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index f25be20cf1a6..47628964801e 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -333,6 +333,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
333 333
334 /* create the JPEG header */ 334 /* create the JPEG header */
335 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); 335 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
336 if (!sd->jpeg_hdr)
337 return -ENOMEM;
336 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 338 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
337 0x22); /* JPEG 411 */ 339 0x22); /* JPEG 411 */
338 jpeg_set_qual(sd->jpeg_hdr, sd->quality); 340 jpeg_set_qual(sd->jpeg_hdr, sd->quality);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index 9df7137fe67e..992ce530f138 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -36,10 +36,6 @@
36 36
37#define STV_ISOC_ENDPOINT_ADDR 0x81 37#define STV_ISOC_ENDPOINT_ADDR 0x81
38 38
39#ifndef V4L2_PIX_FMT_SGRBG8
40#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G')
41#endif
42
43#define STV_REG23 0x0423 39#define STV_REG23 0x0423
44 40
45/* Control registers of the STV0600 ASIC */ 41/* Control registers of the STV0600 ASIC */
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
index 3039ec208f3a..e5024c8496ef 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -64,7 +64,7 @@ static struct v4l2_pix_format hdcs1x00_mode[] = {
64 { 64 {
65 HDCS_1X00_DEF_WIDTH, 65 HDCS_1X00_DEF_WIDTH,
66 HDCS_1X00_DEF_HEIGHT, 66 HDCS_1X00_DEF_HEIGHT,
67 V4L2_PIX_FMT_SBGGR8, 67 V4L2_PIX_FMT_SGRBG8,
68 V4L2_FIELD_NONE, 68 V4L2_FIELD_NONE,
69 .sizeimage = 69 .sizeimage =
70 HDCS_1X00_DEF_WIDTH * HDCS_1X00_DEF_HEIGHT, 70 HDCS_1X00_DEF_WIDTH * HDCS_1X00_DEF_HEIGHT,
@@ -80,7 +80,7 @@ static struct v4l2_pix_format hdcs1020_mode[] = {
80 { 80 {
81 HDCS_1020_DEF_WIDTH, 81 HDCS_1020_DEF_WIDTH,
82 HDCS_1020_DEF_HEIGHT, 82 HDCS_1020_DEF_HEIGHT,
83 V4L2_PIX_FMT_SBGGR8, 83 V4L2_PIX_FMT_SGRBG8,
84 V4L2_FIELD_NONE, 84 V4L2_FIELD_NONE,
85 .sizeimage = 85 .sizeimage =
86 HDCS_1020_DEF_WIDTH * HDCS_1020_DEF_HEIGHT, 86 HDCS_1020_DEF_WIDTH * HDCS_1020_DEF_HEIGHT,
@@ -131,9 +131,11 @@ static int hdcs_reg_write_seq(struct sd *sd, u8 reg, u8 *vals, u8 len)
131 (reg + len > 0xff))) 131 (reg + len > 0xff)))
132 return -EINVAL; 132 return -EINVAL;
133 133
134 for (i = 0; i < len; i++, reg++) { 134 for (i = 0; i < len; i++) {
135 regs[2*i] = reg; 135 regs[2 * i] = reg;
136 regs[2*i+1] = vals[i]; 136 regs[2 * i + 1] = vals[i];
137 /* All addresses are shifted left one bit as bit 0 toggles r/w */
138 reg += 2;
137 } 139 }
138 140
139 return stv06xx_write_sensor_bytes(sd, regs, len); 141 return stv06xx_write_sensor_bytes(sd, regs, len);
@@ -174,7 +176,9 @@ static int hdcs_set_state(struct sd *sd, enum hdcs_power_state state)
174 } 176 }
175 177
176 ret = stv06xx_write_sensor(sd, HDCS_REG_CONTROL(sd), val); 178 ret = stv06xx_write_sensor(sd, HDCS_REG_CONTROL(sd), val);
177 if (ret < 0) 179
180 /* Update the state if the write succeeded */
181 if (!ret)
178 hdcs->state = state; 182 hdcs->state = state;
179 183
180 return ret; 184 return ret;
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index 9623f294bdac..5127bbf9dd26 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -973,6 +973,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
973 973
974 /* create the JPEG header */ 974 /* create the JPEG header */
975 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); 975 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
976 if (!sd->jpeg_hdr)
977 return -ENOMEM;
976 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 978 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
977 0x22); /* JPEG 411 */ 979 0x22); /* JPEG 411 */
978 jpeg_set_qual(sd->jpeg_hdr, sd->quality); 980 jpeg_set_qual(sd->jpeg_hdr, sd->quality);
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 08422d315e68..3d2756f7874a 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -7243,6 +7243,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
7243 7243
7244 /* create the JPEG header */ 7244 /* create the JPEG header */
7245 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); 7245 sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
7246 if (!sd->jpeg_hdr)
7247 return -ENOMEM;
7246 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 7248 jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
7247 0x21); /* JPEG 422 */ 7249 0x21); /* JPEG 422 */
7248 jpeg_set_qual(sd->jpeg_hdr, sd->quality); 7250 jpeg_set_qual(sd->jpeg_hdr, sd->quality);
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
new file mode 100644
index 000000000000..b2260de645f0
--- /dev/null
+++ b/drivers/media/video/mt9v011.c
@@ -0,0 +1,496 @@
1/*
2 * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
3 *
4 * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
5 * This code is placed under the terms of the GNU General Public License v2
6 */
7
8#include <linux/i2c.h>
9#include <linux/videodev2.h>
10#include <linux/delay.h>
11#include <asm/div64.h>
12#include <media/v4l2-device.h>
13#include "mt9v011.h"
14#include <media/v4l2-i2c-drv.h>
15#include <media/v4l2-chip-ident.h>
16
17MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
18MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
19MODULE_LICENSE("GPL");
20
21
22static int debug;
23module_param(debug, int, 0);
24MODULE_PARM_DESC(debug, "Debug level (0-2)");
25
26/* supported controls */
27static struct v4l2_queryctrl mt9v011_qctrl[] = {
28 {
29 .id = V4L2_CID_GAIN,
30 .type = V4L2_CTRL_TYPE_INTEGER,
31 .name = "Gain",
32 .minimum = 0,
33 .maximum = (1 << 10) - 1,
34 .step = 1,
35 .default_value = 0x0020,
36 .flags = 0,
37 }, {
38 .id = V4L2_CID_RED_BALANCE,
39 .type = V4L2_CTRL_TYPE_INTEGER,
40 .name = "Red Balance",
41 .minimum = -1 << 9,
42 .maximum = (1 << 9) - 1,
43 .step = 1,
44 .default_value = 0,
45 .flags = 0,
46 }, {
47 .id = V4L2_CID_BLUE_BALANCE,
48 .type = V4L2_CTRL_TYPE_INTEGER,
49 .name = "Blue Balance",
50 .minimum = -1 << 9,
51 .maximum = (1 << 9) - 1,
52 .step = 1,
53 .default_value = 0,
54 .flags = 0,
55 },
56};
57
58struct mt9v011 {
59 struct v4l2_subdev sd;
60 unsigned width, height;
61 unsigned xtal;
62
63 u16 global_gain, red_bal, blue_bal;
64};
65
66static inline struct mt9v011 *to_mt9v011(struct v4l2_subdev *sd)
67{
68 return container_of(sd, struct mt9v011, sd);
69}
70
71static int mt9v011_read(struct v4l2_subdev *sd, unsigned char addr)
72{
73 struct i2c_client *c = v4l2_get_subdevdata(sd);
74 __be16 buffer;
75 int rc, val;
76
77 rc = i2c_master_send(c, &addr, 1);
78 if (rc != 1)
79 v4l2_dbg(0, debug, sd,
80 "i2c i/o error: rc == %d (should be 1)\n", rc);
81
82 msleep(10);
83
84 rc = i2c_master_recv(c, (char *)&buffer, 2);
85 if (rc != 2)
86 v4l2_dbg(0, debug, sd,
87 "i2c i/o error: rc == %d (should be 2)\n", rc);
88
89 val = be16_to_cpu(buffer);
90
91 v4l2_dbg(2, debug, sd, "mt9v011: read 0x%02x = 0x%04x\n", addr, val);
92
93 return val;
94}
95
96static void mt9v011_write(struct v4l2_subdev *sd, unsigned char addr,
97 u16 value)
98{
99 struct i2c_client *c = v4l2_get_subdevdata(sd);
100 unsigned char buffer[3];
101 int rc;
102
103 buffer[0] = addr;
104 buffer[1] = value >> 8;
105 buffer[2] = value & 0xff;
106
107 v4l2_dbg(2, debug, sd,
108 "mt9v011: writing 0x%02x 0x%04x\n", buffer[0], value);
109 rc = i2c_master_send(c, buffer, 3);
110 if (rc != 3)
111 v4l2_dbg(0, debug, sd,
112 "i2c i/o error: rc == %d (should be 3)\n", rc);
113}
114
115
116struct i2c_reg_value {
117 unsigned char reg;
118 u16 value;
119};
120
121/*
122 * Values used at the original driver
123 * Some values are marked as Reserved at the datasheet
124 */
125static const struct i2c_reg_value mt9v011_init_default[] = {
126 { R0D_MT9V011_RESET, 0x0001 },
127 { R0D_MT9V011_RESET, 0x0000 },
128
129 { R0C_MT9V011_SHUTTER_DELAY, 0x0000 },
130 { R09_MT9V011_SHUTTER_WIDTH, 0x1fc },
131
132 { R0A_MT9V011_CLK_SPEED, 0x0000 },
133 { R1E_MT9V011_DIGITAL_ZOOM, 0x0000 },
134 { R20_MT9V011_READ_MODE, 0x1000 },
135
136 { R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */
137};
138
139static void set_balance(struct v4l2_subdev *sd)
140{
141 struct mt9v011 *core = to_mt9v011(sd);
142 u16 green1_gain, green2_gain, blue_gain, red_gain;
143
144 green1_gain = core->global_gain;
145 green2_gain = core->global_gain;
146
147 blue_gain = core->global_gain +
148 core->global_gain * core->blue_bal / (1 << 9);
149
150 red_gain = core->global_gain +
151 core->global_gain * core->blue_bal / (1 << 9);
152
153 mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green1_gain);
154 mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green1_gain);
155 mt9v011_write(sd, R2C_MT9V011_BLUE_GAIN, blue_gain);
156 mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain);
157}
158
159static void calc_fps(struct v4l2_subdev *sd)
160{
161 struct mt9v011 *core = to_mt9v011(sd);
162 unsigned height, width, hblank, vblank, speed;
163 unsigned row_time, t_time;
164 u64 frames_per_ms;
165 unsigned tmp;
166
167 height = mt9v011_read(sd, R03_MT9V011_HEIGHT);
168 width = mt9v011_read(sd, R04_MT9V011_WIDTH);
169 hblank = mt9v011_read(sd, R05_MT9V011_HBLANK);
170 vblank = mt9v011_read(sd, R06_MT9V011_VBLANK);
171 speed = mt9v011_read(sd, R0A_MT9V011_CLK_SPEED);
172
173 row_time = (width + 113 + hblank) * (speed + 2);
174 t_time = row_time * (height + vblank + 1);
175
176 frames_per_ms = core->xtal * 1000l;
177 do_div(frames_per_ms, t_time);
178 tmp = frames_per_ms;
179
180 v4l2_dbg(1, debug, sd, "Programmed to %u.%03u fps (%d pixel clcks)\n",
181 tmp / 1000, tmp % 1000, t_time);
182}
183
184static void set_res(struct v4l2_subdev *sd)
185{
186 struct mt9v011 *core = to_mt9v011(sd);
187 unsigned vstart, hstart;
188
189 /*
190 * The mt9v011 doesn't have scaling. So, in order to select the desired
191 * resolution, we're cropping at the middle of the sensor.
192 * hblank and vblank should be adjusted, in order to warrant that
193 * we'll preserve the line timings for 30 fps, no matter what resolution
194 * is selected.
195 * NOTE: datasheet says that width (and height) should be filled with
196 * width-1. However, this doesn't work, since one pixel per line will
197 * be missing.
198 */
199
200 hstart = 14 + (640 - core->width) / 2;
201 mt9v011_write(sd, R02_MT9V011_COLSTART, hstart);
202 mt9v011_write(sd, R04_MT9V011_WIDTH, core->width);
203 mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width);
204
205 vstart = 8 + (480 - core->height) / 2;
206 mt9v011_write(sd, R01_MT9V011_ROWSTART, vstart);
207 mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height);
208 mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height);
209
210 calc_fps(sd);
211};
212
213static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
214{
215 int i;
216
217 for (i = 0; i < ARRAY_SIZE(mt9v011_init_default); i++)
218 mt9v011_write(sd, mt9v011_init_default[i].reg,
219 mt9v011_init_default[i].value);
220
221 set_balance(sd);
222 set_res(sd);
223
224 return 0;
225};
226
227static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
228{
229 struct mt9v011 *core = to_mt9v011(sd);
230
231 v4l2_dbg(1, debug, sd, "g_ctrl called\n");
232
233 switch (ctrl->id) {
234 case V4L2_CID_GAIN:
235 ctrl->value = core->global_gain;
236 return 0;
237 case V4L2_CID_RED_BALANCE:
238 ctrl->value = core->red_bal;
239 return 0;
240 case V4L2_CID_BLUE_BALANCE:
241 ctrl->value = core->blue_bal;
242 return 0;
243 }
244 return -EINVAL;
245}
246
247static int mt9v011_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
248{
249 int i;
250
251 v4l2_dbg(1, debug, sd, "queryctrl called\n");
252
253 for (i = 0; i < ARRAY_SIZE(mt9v011_qctrl); i++)
254 if (qc->id && qc->id == mt9v011_qctrl[i].id) {
255 memcpy(qc, &(mt9v011_qctrl[i]),
256 sizeof(*qc));
257 return 0;
258 }
259
260 return -EINVAL;
261}
262
263
264static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
265{
266 struct mt9v011 *core = to_mt9v011(sd);
267 u8 i, n;
268 n = ARRAY_SIZE(mt9v011_qctrl);
269
270 for (i = 0; i < n; i++) {
271 if (ctrl->id != mt9v011_qctrl[i].id)
272 continue;
273 if (ctrl->value < mt9v011_qctrl[i].minimum ||
274 ctrl->value > mt9v011_qctrl[i].maximum)
275 return -ERANGE;
276 v4l2_dbg(1, debug, sd, "s_ctrl: id=%d, value=%d\n",
277 ctrl->id, ctrl->value);
278 break;
279 }
280
281 switch (ctrl->id) {
282 case V4L2_CID_GAIN:
283 core->global_gain = ctrl->value;
284 break;
285 case V4L2_CID_RED_BALANCE:
286 core->red_bal = ctrl->value;
287 break;
288 case V4L2_CID_BLUE_BALANCE:
289 core->blue_bal = ctrl->value;
290 break;
291 default:
292 return -EINVAL;
293 }
294
295 set_balance(sd);
296
297 return 0;
298}
299
300static int mt9v011_enum_fmt(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmt)
301{
302 if (fmt->index > 0)
303 return -EINVAL;
304
305 fmt->flags = 0;
306 strcpy(fmt->description, "8 bpp Bayer GRGR..BGBG");
307 fmt->pixelformat = V4L2_PIX_FMT_SGRBG8;
308
309 return 0;
310}
311
312static int mt9v011_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
313{
314 struct v4l2_pix_format *pix = &fmt->fmt.pix;
315
316 if (pix->pixelformat != V4L2_PIX_FMT_SGRBG8)
317 return -EINVAL;
318
319 v4l_bound_align_image(&pix->width, 48, 639, 1,
320 &pix->height, 32, 480, 1, 0);
321
322 return 0;
323}
324
325static int mt9v011_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
326{
327 struct v4l2_pix_format *pix = &fmt->fmt.pix;
328 struct mt9v011 *core = to_mt9v011(sd);
329 int rc;
330
331 rc = mt9v011_try_fmt(sd, fmt);
332 if (rc < 0)
333 return -EINVAL;
334
335 core->width = pix->width;
336 core->height = pix->height;
337
338 set_res(sd);
339
340 return 0;
341}
342
343static int mt9v011_s_config(struct v4l2_subdev *sd, int dumb, void *data)
344{
345 struct mt9v011 *core = to_mt9v011(sd);
346 unsigned *xtal = data;
347
348 v4l2_dbg(1, debug, sd, "s_config called\n");
349
350 if (xtal) {
351 core->xtal = *xtal;
352 v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
353 *xtal / 1000000, (*xtal / 1000) % 1000);
354 }
355
356 return 0;
357}
358
359
360#ifdef CONFIG_VIDEO_ADV_DEBUG
361static int mt9v011_g_register(struct v4l2_subdev *sd,
362 struct v4l2_dbg_register *reg)
363{
364 struct i2c_client *client = v4l2_get_subdevdata(sd);
365
366 if (!v4l2_chip_match_i2c_client(client, &reg->match))
367 return -EINVAL;
368 if (!capable(CAP_SYS_ADMIN))
369 return -EPERM;
370
371 reg->val = mt9v011_read(sd, reg->reg & 0xff);
372 reg->size = 2;
373
374 return 0;
375}
376
377static int mt9v011_s_register(struct v4l2_subdev *sd,
378 struct v4l2_dbg_register *reg)
379{
380 struct i2c_client *client = v4l2_get_subdevdata(sd);
381
382 if (!v4l2_chip_match_i2c_client(client, &reg->match))
383 return -EINVAL;
384 if (!capable(CAP_SYS_ADMIN))
385 return -EPERM;
386
387 mt9v011_write(sd, reg->reg & 0xff, reg->val & 0xffff);
388
389 return 0;
390}
391#endif
392
393static int mt9v011_g_chip_ident(struct v4l2_subdev *sd,
394 struct v4l2_dbg_chip_ident *chip)
395{
396 struct i2c_client *client = v4l2_get_subdevdata(sd);
397
398 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_MT9V011,
399 MT9V011_VERSION);
400}
401
402static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
403 .queryctrl = mt9v011_queryctrl,
404 .g_ctrl = mt9v011_g_ctrl,
405 .s_ctrl = mt9v011_s_ctrl,
406 .reset = mt9v011_reset,
407 .s_config = mt9v011_s_config,
408 .g_chip_ident = mt9v011_g_chip_ident,
409#ifdef CONFIG_VIDEO_ADV_DEBUG
410 .g_register = mt9v011_g_register,
411 .s_register = mt9v011_s_register,
412#endif
413};
414
415static const struct v4l2_subdev_video_ops mt9v011_video_ops = {
416 .enum_fmt = mt9v011_enum_fmt,
417 .try_fmt = mt9v011_try_fmt,
418 .s_fmt = mt9v011_s_fmt,
419};
420
421static const struct v4l2_subdev_ops mt9v011_ops = {
422 .core = &mt9v011_core_ops,
423 .video = &mt9v011_video_ops,
424};
425
426
427/****************************************************************************
428 I2C Client & Driver
429 ****************************************************************************/
430
431static int mt9v011_probe(struct i2c_client *c,
432 const struct i2c_device_id *id)
433{
434 u16 version;
435 struct mt9v011 *core;
436 struct v4l2_subdev *sd;
437
438 /* Check if the adapter supports the needed features */
439 if (!i2c_check_functionality(c->adapter,
440 I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
441 return -EIO;
442
443 core = kzalloc(sizeof(struct mt9v011), GFP_KERNEL);
444 if (!core)
445 return -ENOMEM;
446
447 sd = &core->sd;
448 v4l2_i2c_subdev_init(sd, c, &mt9v011_ops);
449
450 /* Check if the sensor is really a MT9V011 */
451 version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
452 if (version != MT9V011_VERSION) {
453 v4l2_info(sd, "*** unknown micron chip detected (0x%04x.\n",
454 version);
455 kfree(core);
456 return -EINVAL;
457 }
458
459 core->global_gain = 0x0024;
460 core->width = 640;
461 core->height = 480;
462 core->xtal = 27000000; /* Hz */
463
464 v4l_info(c, "chip found @ 0x%02x (%s)\n",
465 c->addr << 1, c->adapter->name);
466
467 return 0;
468}
469
470static int mt9v011_remove(struct i2c_client *c)
471{
472 struct v4l2_subdev *sd = i2c_get_clientdata(c);
473
474 v4l2_dbg(1, debug, sd,
475 "mt9v011.c: removing mt9v011 adapter on address 0x%x\n",
476 c->addr << 1);
477
478 v4l2_device_unregister_subdev(sd);
479 kfree(to_mt9v011(sd));
480 return 0;
481}
482
483/* ----------------------------------------------------------------------- */
484
485static const struct i2c_device_id mt9v011_id[] = {
486 { "mt9v011", 0 },
487 { }
488};
489MODULE_DEVICE_TABLE(i2c, mt9v011_id);
490
491static struct v4l2_i2c_driver_data v4l2_i2c_data = {
492 .name = "mt9v011",
493 .probe = mt9v011_probe,
494 .remove = mt9v011_remove,
495 .id_table = mt9v011_id,
496};
diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h
new file mode 100644
index 000000000000..9e443ee30558
--- /dev/null
+++ b/drivers/media/video/mt9v011.h
@@ -0,0 +1,35 @@
1/*
2 * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
3 *
4 * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
5 * This code is placed under the terms of the GNU General Public License v2
6 */
7
8#ifndef MT9V011_H_
9#define MT9V011_H_
10
11#define R00_MT9V011_CHIP_VERSION 0x00
12#define R01_MT9V011_ROWSTART 0x01
13#define R02_MT9V011_COLSTART 0x02
14#define R03_MT9V011_HEIGHT 0x03
15#define R04_MT9V011_WIDTH 0x04
16#define R05_MT9V011_HBLANK 0x05
17#define R06_MT9V011_VBLANK 0x06
18#define R07_MT9V011_OUT_CTRL 0x07
19#define R09_MT9V011_SHUTTER_WIDTH 0x09
20#define R0A_MT9V011_CLK_SPEED 0x0a
21#define R0B_MT9V011_RESTART 0x0b
22#define R0C_MT9V011_SHUTTER_DELAY 0x0c
23#define R0D_MT9V011_RESET 0x0d
24#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
25#define R20_MT9V011_READ_MODE 0x20
26#define R2B_MT9V011_GREEN_1_GAIN 0x2b
27#define R2C_MT9V011_BLUE_GAIN 0x2c
28#define R2D_MT9V011_RED_GAIN 0x2d
29#define R2E_MT9V011_GREEN_2_GAIN 0x2e
30#define R35_MT9V011_GLOBAL_GAIN 0x35
31#define RF1_MT9V011_CHIP_ENABLE 0xf1
32
33#define MT9V011_VERSION 0x8243
34
35#endif
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index db25c3034c11..8d17cf613306 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -62,6 +62,7 @@
62#include <linux/module.h> 62#include <linux/module.h>
63#include <linux/poll.h> 63#include <linux/poll.h>
64#include <linux/slab.h> 64#include <linux/slab.h>
65#include <linux/smp_lock.h>
65#ifdef CONFIG_USB_PWC_INPUT_EVDEV 66#ifdef CONFIG_USB_PWC_INPUT_EVDEV
66#include <linux/usb/input.h> 67#include <linux/usb/input.h>
67#endif 68#endif
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index 0be6f814f539..0b658dee05a4 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -29,7 +29,6 @@
29#include <linux/usb.h> 29#include <linux/usb.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/wait.h> 31#include <linux/wait.h>
32#include <linux/smp_lock.h>
33#include <linux/version.h> 32#include <linux/version.h>
34#include <linux/mutex.h> 33#include <linux/mutex.h>
35#include <linux/mm.h> 34#include <linux/mm.h>
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 6be845ccc7d7..9e3262c0ba37 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -48,6 +48,7 @@
48#include <linux/videodev2.h> 48#include <linux/videodev2.h>
49#include <linux/version.h> 49#include <linux/version.h>
50#include <linux/mm.h> 50#include <linux/mm.h>
51#include <linux/smp_lock.h>
51#include <media/videobuf-vmalloc.h> 52#include <media/videobuf-vmalloc.h>
52#include <media/v4l2-common.h> 53#include <media/v4l2-common.h>
53#include <media/v4l2-ioctl.h> 54#include <media/v4l2-ioctl.h>
diff --git a/drivers/media/video/saa5246a.c b/drivers/media/video/saa5246a.c
index 155804b061e9..b624a4c01fdc 100644
--- a/drivers/media/video/saa5246a.c
+++ b/drivers/media/video/saa5246a.c
@@ -43,7 +43,6 @@
43#include <linux/mm.h> 43#include <linux/mm.h>
44#include <linux/init.h> 44#include <linux/init.h>
45#include <linux/i2c.h> 45#include <linux/i2c.h>
46#include <linux/smp_lock.h>
47#include <linux/mutex.h> 46#include <linux/mutex.h>
48#include <linux/videotext.h> 47#include <linux/videotext.h>
49#include <linux/videodev2.h> 48#include <linux/videodev2.h>
diff --git a/drivers/media/video/saa5249.c b/drivers/media/video/saa5249.c
index 271d6e931b75..12835fb82c95 100644
--- a/drivers/media/video/saa5249.c
+++ b/drivers/media/video/saa5249.c
@@ -46,7 +46,6 @@
46#include <linux/mm.h> 46#include <linux/mm.h>
47#include <linux/init.h> 47#include <linux/init.h>
48#include <linux/i2c.h> 48#include <linux/i2c.h>
49#include <linux/smp_lock.h>
50#include <linux/mutex.h> 49#include <linux/mutex.h>
51#include <linux/delay.h> 50#include <linux/delay.h>
52#include <linux/videotext.h> 51#include <linux/videotext.h>
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index add1757f8930..296788c3bf0e 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/smp_lock.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
26 27
27#include "saa7134-reg.h" 28#include "saa7134-reg.h"
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index c8f05297d0f0..85ffc2cba039 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -31,6 +31,7 @@ static const char version[] = "0.24";
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/smp_lock.h>
34#include <linux/pagemap.h> 35#include <linux/pagemap.h>
35#include <linux/usb.h> 36#include <linux/usb.h>
36#include "se401.h" 37#include "se401.h"
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 16f595d4337a..9f5ae8167855 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -237,11 +237,11 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
237 return -ENOMEM; 237 return -ENOMEM;
238 238
239 icd->num_user_formats = fmts; 239 icd->num_user_formats = fmts;
240 fmts = 0;
241 240
242 dev_dbg(&icd->dev, "Found %d supported formats.\n", fmts); 241 dev_dbg(&icd->dev, "Found %d supported formats.\n", fmts);
243 242
244 /* Second pass - actually fill data formats */ 243 /* Second pass - actually fill data formats */
244 fmts = 0;
245 for (i = 0; i < icd->num_formats; i++) 245 for (i = 0; i < icd->num_formats; i++)
246 if (!ici->ops->get_formats) { 246 if (!ici->ops->get_formats) {
247 icd->user_formats[i].host_fmt = icd->formats + i; 247 icd->user_formats[i].host_fmt = icd->formats + i;
@@ -877,8 +877,11 @@ static int soc_camera_probe(struct device *dev)
877 (unsigned short)~0; 877 (unsigned short)~0;
878 878
879 ret = soc_camera_init_user_formats(icd); 879 ret = soc_camera_init_user_formats(icd);
880 if (ret < 0) 880 if (ret < 0) {
881 if (icd->ops->remove)
882 icd->ops->remove(icd);
881 goto eiufmt; 883 goto eiufmt;
884 }
882 885
883 icd->height = DEFAULT_HEIGHT; 886 icd->height = DEFAULT_HEIGHT;
884 icd->width = DEFAULT_WIDTH; 887 icd->width = DEFAULT_WIDTH;
@@ -902,8 +905,10 @@ static int soc_camera_remove(struct device *dev)
902{ 905{
903 struct soc_camera_device *icd = to_soc_camera_dev(dev); 906 struct soc_camera_device *icd = to_soc_camera_dev(dev);
904 907
908 mutex_lock(&icd->video_lock);
905 if (icd->ops->remove) 909 if (icd->ops->remove)
906 icd->ops->remove(icd); 910 icd->ops->remove(icd);
911 mutex_unlock(&icd->video_lock);
907 912
908 soc_camera_free_user_formats(icd); 913 soc_camera_free_user_formats(icd);
909 914
@@ -1145,6 +1150,7 @@ evidallocd:
1145} 1150}
1146EXPORT_SYMBOL(soc_camera_video_start); 1151EXPORT_SYMBOL(soc_camera_video_start);
1147 1152
1153/* Called from client .remove() methods with .video_lock held */
1148void soc_camera_video_stop(struct soc_camera_device *icd) 1154void soc_camera_video_stop(struct soc_camera_device *icd)
1149{ 1155{
1150 struct video_device *vdev = icd->vdev; 1156 struct video_device *vdev = icd->vdev;
@@ -1154,10 +1160,8 @@ void soc_camera_video_stop(struct soc_camera_device *icd)
1154 if (!icd->dev.parent || !vdev) 1160 if (!icd->dev.parent || !vdev)
1155 return; 1161 return;
1156 1162
1157 mutex_lock(&icd->video_lock);
1158 video_unregister_device(vdev); 1163 video_unregister_device(vdev);
1159 icd->vdev = NULL; 1164 icd->vdev = NULL;
1160 mutex_unlock(&icd->video_lock);
1161} 1165}
1162EXPORT_SYMBOL(soc_camera_video_stop); 1166EXPORT_SYMBOL(soc_camera_video_stop);
1163 1167
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index 2e5937047278..4d6785e63455 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -27,6 +27,7 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/smp_lock.h>
30 31
31#include <linux/usb.h> 32#include <linux/usb.h>
32#include <linux/mm.h> 33#include <linux/mm.h>
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index 0eb313082c97..eaada39c76fd 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/major.h> 27#include <linux/major.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/smp_lock.h>
29#include <linux/mm.h> 30#include <linux/mm.h>
30#include <linux/init.h> 31#include <linux/init.h>
31#include <linux/poll.h> 32#include <linux/poll.h>
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 75f286f7a2e9..8b4e7dafce7b 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -62,6 +62,7 @@
62#include <linux/init.h> 62#include <linux/init.h>
63#include <linux/vmalloc.h> 63#include <linux/vmalloc.h>
64#include <linux/slab.h> 64#include <linux/slab.h>
65#include <linux/smp_lock.h>
65#include <linux/pagemap.h> 66#include <linux/pagemap.h>
66#include <linux/errno.h> 67#include <linux/errno.h>
67#include <linux/videodev.h> 68#include <linux/videodev.h>
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 8d73979596f9..45fce39ec9ad 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -43,6 +43,7 @@
43#include <linux/vmalloc.h> 43#include <linux/vmalloc.h>
44#include <linux/mm.h> 44#include <linux/mm.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/smp_lock.h>
46#include <linux/mutex.h> 47#include <linux/mutex.h>
47#include <linux/firmware.h> 48#include <linux/firmware.h>
48#include <linux/ihex.h> 49#include <linux/ihex.h>
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 90b58914f984..90d9b5c0e9a7 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -50,6 +50,7 @@
50#include <linux/list.h> 50#include <linux/list.h>
51#include <linux/timer.h> 51#include <linux/timer.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/smp_lock.h>
53#include <linux/mm.h> 54#include <linux/mm.h>
54#include <linux/utsname.h> 55#include <linux/utsname.h>
55#include <linux/highmem.h> 56#include <linux/highmem.h>
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 31eac66411d7..a7f1b69a7dab 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -25,7 +25,6 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/kmod.h> 26#include <linux/kmod.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/smp_lock.h>
29#include <asm/uaccess.h> 28#include <asm/uaccess.h>
30#include <asm/system.h> 29#include <asm/system.h>
31 30
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index cd7266858462..7705fc6baf00 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -343,6 +343,53 @@ static struct bar_std bars[] = {
343#define TO_U(r, g, b) \ 343#define TO_U(r, g, b) \
344 (((-9714 * r - 19070 * g + 28784 * b + 32768) >> 16) + 128) 344 (((-9714 * r - 19070 * g + 28784 * b + 32768) >> 16) + 128)
345 345
346/* precalculate color bar values to speed up rendering */
347static void precalculate_bars(struct vivi_fh *fh)
348{
349 struct vivi_dev *dev = fh->dev;
350 unsigned char r, g, b;
351 int k, is_yuv;
352
353 fh->input = dev->input;
354
355 for (k = 0; k < 8; k++) {
356 r = bars[fh->input].bar[k][0];
357 g = bars[fh->input].bar[k][1];
358 b = bars[fh->input].bar[k][2];
359 is_yuv = 0;
360
361 switch (fh->fmt->fourcc) {
362 case V4L2_PIX_FMT_YUYV:
363 case V4L2_PIX_FMT_UYVY:
364 is_yuv = 1;
365 break;
366 case V4L2_PIX_FMT_RGB565:
367 case V4L2_PIX_FMT_RGB565X:
368 r >>= 3;
369 g >>= 2;
370 b >>= 3;
371 break;
372 case V4L2_PIX_FMT_RGB555:
373 case V4L2_PIX_FMT_RGB555X:
374 r >>= 3;
375 g >>= 3;
376 b >>= 3;
377 break;
378 }
379
380 if (is_yuv) {
381 fh->bars[k][0] = TO_Y(r, g, b); /* Luma */
382 fh->bars[k][1] = TO_U(r, g, b); /* Cb */
383 fh->bars[k][2] = TO_V(r, g, b); /* Cr */
384 } else {
385 fh->bars[k][0] = r;
386 fh->bars[k][1] = g;
387 fh->bars[k][2] = b;
388 }
389 }
390
391}
392
346#define TSTAMP_MIN_Y 24 393#define TSTAMP_MIN_Y 24
347#define TSTAMP_MAX_Y (TSTAMP_MIN_Y + 15) 394#define TSTAMP_MAX_Y (TSTAMP_MIN_Y + 15)
348#define TSTAMP_INPUT_X 10 395#define TSTAMP_INPUT_X 10
@@ -755,6 +802,8 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
755 buf->vb.height = fh->height; 802 buf->vb.height = fh->height;
756 buf->vb.field = field; 803 buf->vb.field = field;
757 804
805 precalculate_bars(fh);
806
758 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { 807 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
759 rc = videobuf_iolock(vq, &buf->vb, NULL); 808 rc = videobuf_iolock(vq, &buf->vb, NULL);
760 if (rc < 0) 809 if (rc < 0)
@@ -893,53 +942,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
893 return 0; 942 return 0;
894} 943}
895 944
896/* precalculate color bar values to speed up rendering */
897static void precalculate_bars(struct vivi_fh *fh)
898{
899 struct vivi_dev *dev = fh->dev;
900 unsigned char r, g, b;
901 int k, is_yuv;
902
903 fh->input = dev->input;
904
905 for (k = 0; k < 8; k++) {
906 r = bars[fh->input].bar[k][0];
907 g = bars[fh->input].bar[k][1];
908 b = bars[fh->input].bar[k][2];
909 is_yuv = 0;
910
911 switch (fh->fmt->fourcc) {
912 case V4L2_PIX_FMT_YUYV:
913 case V4L2_PIX_FMT_UYVY:
914 is_yuv = 1;
915 break;
916 case V4L2_PIX_FMT_RGB565:
917 case V4L2_PIX_FMT_RGB565X:
918 r >>= 3;
919 g >>= 2;
920 b >>= 3;
921 break;
922 case V4L2_PIX_FMT_RGB555:
923 case V4L2_PIX_FMT_RGB555X:
924 r >>= 3;
925 g >>= 3;
926 b >>= 3;
927 break;
928 }
929
930 if (is_yuv) {
931 fh->bars[k][0] = TO_Y(r, g, b); /* Luma */
932 fh->bars[k][1] = TO_U(r, g, b); /* Cb */
933 fh->bars[k][2] = TO_V(r, g, b); /* Cr */
934 } else {
935 fh->bars[k][0] = r;
936 fh->bars[k][1] = g;
937 fh->bars[k][2] = b;
938 }
939 }
940
941}
942
943/*FIXME: This seems to be generic enough to be at videodev2 */ 945/*FIXME: This seems to be generic enough to be at videodev2 */
944static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 946static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
945 struct v4l2_format *f) 947 struct v4l2_format *f)
@@ -965,8 +967,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
965 fh->vb_vidq.field = f->fmt.pix.field; 967 fh->vb_vidq.field = f->fmt.pix.field;
966 fh->type = f->type; 968 fh->type = f->type;
967 969
968 precalculate_bars(fh);
969
970 ret = 0; 970 ret = 0;
971out: 971out:
972 mutex_unlock(&q->vb_lock); 972 mutex_unlock(&q->vb_lock);
@@ -1357,6 +1357,7 @@ static int __init vivi_create_instance(int inst)
1357 goto unreg_dev; 1357 goto unreg_dev;
1358 1358
1359 *vfd = vivi_template; 1359 *vfd = vivi_template;
1360 vfd->debug = debug;
1360 1361
1361 ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr); 1362 ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
1362 if (ret < 0) 1363 if (ret < 0)
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 3d7df32a3d87..bcdefb1bcb3d 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -49,6 +49,7 @@
49#include <linux/module.h> 49#include <linux/module.h>
50#include <linux/delay.h> 50#include <linux/delay.h>
51#include <linux/slab.h> 51#include <linux/slab.h>
52#include <linux/smp_lock.h>
52#include <linux/pci.h> 53#include <linux/pci.h>
53#include <linux/vmalloc.h> 54#include <linux/vmalloc.h>
54#include <linux/wait.h> 55#include <linux/wait.h>
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 7ac12cb0be4a..5b6e58a3ba46 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -32,8 +32,7 @@
32 * This driver was tested with firmware revision A4. 32 * This driver was tested with firmware revision A4.
33 */ 33 */
34 34
35#if defined(CONFIG_KEYBOARD_DM355EVM) \ 35#if defined(CONFIG_INPUT_DM355EVM) || defined(CONFIG_INPUT_DM355EVM_MODULE)
36 || defined(CONFIG_KEYBOARD_DM355EVM_MODULE)
37#define msp_has_keyboard() true 36#define msp_has_keyboard() true
38#else 37#else
39#define msp_has_keyboard() false 38#define msp_has_keyboard() false
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 671a7efe86a8..c1de4afa89a6 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -238,8 +238,10 @@ static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
238 mutex_lock(&pcap->adc_mutex); 238 mutex_lock(&pcap->adc_mutex);
239 req = pcap->adc_queue[pcap->adc_head]; 239 req = pcap->adc_queue[pcap->adc_head];
240 240
241 if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) 241 if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) {
242 mutex_unlock(&pcap->adc_mutex);
242 return IRQ_HANDLED; 243 return IRQ_HANDLED;
244 }
243 245
244 /* read requested channels results */ 246 /* read requested channels results */
245 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); 247 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 4c7b7962f6b8..0cc5eeff5ee8 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -367,7 +367,8 @@ int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to)
367 break; 367 break;
368 368
369 default: 369 default:
370 return -1; 370 gate = -1;
371 goto already;
371 } 372 }
372 373
373 writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); 374 writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index bae61b22501c..7d430835655f 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -180,14 +180,9 @@ static struct completion irq_event;
180static int twl4030_irq_thread(void *data) 180static int twl4030_irq_thread(void *data)
181{ 181{
182 long irq = (long)data; 182 long irq = (long)data;
183 struct irq_desc *desc = irq_to_desc(irq);
184 static unsigned i2c_errors; 183 static unsigned i2c_errors;
185 static const unsigned max_i2c_errors = 100; 184 static const unsigned max_i2c_errors = 100;
186 185
187 if (!desc) {
188 pr_err("twl4030: Invalid IRQ: %ld\n", irq);
189 return -EINVAL;
190 }
191 186
192 current->flags |= PF_NOFREEZE; 187 current->flags |= PF_NOFREEZE;
193 188
@@ -240,7 +235,7 @@ static int twl4030_irq_thread(void *data)
240 } 235 }
241 local_irq_enable(); 236 local_irq_enable();
242 237
243 desc->chip->unmask(irq); 238 enable_irq(irq);
244 } 239 }
245 240
246 return 0; 241 return 0;
@@ -255,25 +250,13 @@ static int twl4030_irq_thread(void *data)
255 * thread. All we do here is acknowledge and mask the interrupt and wakeup 250 * thread. All we do here is acknowledge and mask the interrupt and wakeup
256 * the kernel thread. 251 * the kernel thread.
257 */ 252 */
258static void handle_twl4030_pih(unsigned int irq, struct irq_desc *desc) 253static irqreturn_t handle_twl4030_pih(int irq, void *devid)
259{ 254{
260 /* Acknowledge, clear *AND* mask the interrupt... */ 255 /* Acknowledge, clear *AND* mask the interrupt... */
261 desc->chip->ack(irq); 256 disable_irq_nosync(irq);
262 complete(&irq_event); 257 complete(devid);
263} 258 return IRQ_HANDLED;
264
265static struct task_struct *start_twl4030_irq_thread(long irq)
266{
267 struct task_struct *thread;
268
269 init_completion(&irq_event);
270 thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq");
271 if (!thread)
272 pr_err("twl4030: could not create irq %ld thread!\n", irq);
273
274 return thread;
275} 259}
276
277/*----------------------------------------------------------------------*/ 260/*----------------------------------------------------------------------*/
278 261
279/* 262/*
@@ -734,18 +717,28 @@ int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
734 } 717 }
735 718
736 /* install an irq handler to demultiplex the TWL4030 interrupt */ 719 /* install an irq handler to demultiplex the TWL4030 interrupt */
737 task = start_twl4030_irq_thread(irq_num);
738 if (!task) {
739 pr_err("twl4030: irq thread FAIL\n");
740 status = -ESRCH;
741 goto fail;
742 }
743 720
744 set_irq_data(irq_num, task);
745 set_irq_chained_handler(irq_num, handle_twl4030_pih);
746 721
747 return status; 722 init_completion(&irq_event);
748 723
724 status = request_irq(irq_num, handle_twl4030_pih, IRQF_DISABLED,
725 "TWL4030-PIH", &irq_event);
726 if (status < 0) {
727 pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
728 goto fail_rqirq;
729 }
730
731 task = kthread_run(twl4030_irq_thread, (void *)irq_num, "twl4030-irq");
732 if (IS_ERR(task)) {
733 pr_err("twl4030: could not create irq %d thread!\n", irq_num);
734 status = PTR_ERR(task);
735 goto fail_kthread;
736 }
737 return status;
738fail_kthread:
739 free_irq(irq_num, &irq_event);
740fail_rqirq:
741 /* clean up twl4030_sih_setup */
749fail: 742fail:
750 for (i = irq_base; i < irq_end; i++) 743 for (i = irq_base; i < irq_end; i++)
751 set_irq_chip_and_handler(i, NULL, NULL); 744 set_irq_chip_and_handler(i, NULL, NULL);
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index d38a7acdb6ec..d019746551f3 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -114,7 +114,6 @@ static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data)
114 if (!left) 114 if (!left)
115 return; 115 return;
116 addr += len; 116 addr += len;
117 flush_kernel_dcache_page(miter->page);
118 } while (sg_dwiter_next(miter)); 117 } while (sg_dwiter_next(miter));
119} 118}
120 119
@@ -142,9 +141,6 @@ void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t da
142 return; 141 return;
143 } else 142 } else
144 sg_dwiter_write_slow(miter, data); 143 sg_dwiter_write_slow(miter, data);
145
146 if (miter->length == miter->consumed)
147 flush_kernel_dcache_page(miter->page);
148} 144}
149EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block); 145EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block);
150 146
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index b34cb5f79eea..2e535a0ccd5e 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -173,6 +173,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
173 unsigned segment; 173 unsigned segment;
174 unsigned offset = (unsigned) off; 174 unsigned offset = (unsigned) off;
175 u8 *cp = bounce + 1; 175 u8 *cp = bounce + 1;
176 int sr;
176 177
177 *cp = AT25_WREN; 178 *cp = AT25_WREN;
178 status = spi_write(at25->spi, cp, 1); 179 status = spi_write(at25->spi, cp, 1);
@@ -214,7 +215,6 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
214 timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT); 215 timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT);
215 retries = 0; 216 retries = 0;
216 do { 217 do {
217 int sr;
218 218
219 sr = spi_w8r8(at25->spi, AT25_RDSR); 219 sr = spi_w8r8(at25->spi, AT25_RDSR);
220 if (sr < 0 || (sr & AT25_SR_nRDY)) { 220 if (sr < 0 || (sr & AT25_SR_nRDY)) {
@@ -228,7 +228,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
228 break; 228 break;
229 } while (retries++ < 3 || time_before_eq(jiffies, timeout)); 229 } while (retries++ < 3 || time_before_eq(jiffies, timeout));
230 230
231 if (time_after(jiffies, timeout)) { 231 if ((sr < 0) || (sr & AT25_SR_nRDY)) {
232 dev_err(&at25->spi->dev, 232 dev_err(&at25->spi->dev,
233 "write %d bytes offset %d, " 233 "write %d bytes offset %d, "
234 "timeout after %u msecs\n", 234 "timeout after %u msecs\n",
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index fa2d93a9fb8d..aed609832bc2 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -29,7 +29,6 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/mm.h> 30#include <linux/mm.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/smp_lock.h>
33#include <linux/spinlock.h> 32#include <linux/spinlock.h>
34#include <linux/device.h> 33#include <linux/device.h>
35#include <linux/miscdevice.h> 34#include <linux/miscdevice.h>
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index eedbf9c32760..79689b10f937 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -24,7 +24,6 @@
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/smp_lock.h>
28#include <linux/spinlock.h> 27#include <linux/spinlock.h>
29#include <linux/device.h> 28#include <linux/device.h>
30#include <linux/miscdevice.h> 29#include <linux/miscdevice.h>
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 8d1c60a3f0df..5d778ec8cdb2 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -235,7 +235,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
235 skb->ip_summed = CHECKSUM_UNNECESSARY; 235 skb->ip_summed = CHECKSUM_UNNECESSARY;
236 236
237 dev_dbg(xpnet, "passing skb to network layer\n" 237 dev_dbg(xpnet, "passing skb to network layer\n"
238 KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p " 238 "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
239 "skb->end=0x%p skb->len=%d\n", 239 "skb->end=0x%p skb->len=%d\n",
240 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), 240 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
241 skb_end_pointer(skb), skb->len); 241 skb_end_pointer(skb), skb->len);
@@ -399,7 +399,7 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
399 msg->buf_pa = xp_pa((void *)start_addr); 399 msg->buf_pa = xp_pa((void *)start_addr);
400 400
401 dev_dbg(xpnet, "sending XPC message to %d:%d\n" 401 dev_dbg(xpnet, "sending XPC message to %d:%d\n"
402 KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " 402 "msg->buf_pa=0x%lx, msg->size=%u, "
403 "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", 403 "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
404 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, 404 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
405 msg->leadin_ignore, msg->tailout_ignore); 405 msg->leadin_ignore, msg->tailout_ignore);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 11efefb1af51..4e72964a7b43 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -278,7 +278,7 @@ static int cb710_mmc_receive(struct cb710_slot *slot, struct mmc_data *data)
278 if (unlikely(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8))) 278 if (unlikely(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8)))
279 return -EINVAL; 279 return -EINVAL;
280 280
281 sg_miter_start(&miter, data->sg, data->sg_len, 0); 281 sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_TO_SG);
282 282
283 cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 283 cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
284 15, CB710_MMC_C2_READ_PIO_SIZE_MASK); 284 15, CB710_MMC_C2_READ_PIO_SIZE_MASK);
@@ -307,7 +307,7 @@ static int cb710_mmc_receive(struct cb710_slot *slot, struct mmc_data *data)
307 goto out; 307 goto out;
308 } 308 }
309out: 309out:
310 cb710_sg_miter_stop_writing(&miter); 310 sg_miter_stop(&miter);
311 return err; 311 return err;
312} 312}
313 313
@@ -322,7 +322,7 @@ static int cb710_mmc_send(struct cb710_slot *slot, struct mmc_data *data)
322 if (unlikely(data->blocks > 1 && data->blksz & 15)) 322 if (unlikely(data->blocks > 1 && data->blksz & 15))
323 return -EINVAL; 323 return -EINVAL;
324 324
325 sg_miter_start(&miter, data->sg, data->sg_len, 0); 325 sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_FROM_SG);
326 326
327 cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 327 cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
328 0, CB710_MMC_C2_READ_PIO_SIZE_MASK); 328 0, CB710_MMC_C2_READ_PIO_SIZE_MASK);
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index e0be21a4a696..bf98d7cc928a 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -652,7 +652,7 @@ static irqreturn_t imxmci_irq(int irq, void *devid)
652 set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); 652 set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
653 tasklet_schedule(&host->tasklet); 653 tasklet_schedule(&host->tasklet);
654 654
655 return IRQ_RETVAL(handled);; 655 return IRQ_RETVAL(handled);
656} 656}
657 657
658static void imxmci_tasklet_fnc(unsigned long data) 658static void imxmci_tasklet_fnc(unsigned long data)
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 240608cc7ae9..a461017ce5ce 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1313,6 +1313,12 @@ static int mmc_spi_probe(struct spi_device *spi)
1313 struct mmc_spi_host *host; 1313 struct mmc_spi_host *host;
1314 int status; 1314 int status;
1315 1315
1316 /* We rely on full duplex transfers, mostly to reduce
1317 * per-transfer overheads (by making fewer transfers).
1318 */
1319 if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
1320 return -EINVAL;
1321
1316 /* MMC and SD specs only seem to care that sampling is on the 1322 /* MMC and SD specs only seem to care that sampling is on the
1317 * rising edge ... meaning SPI modes 0 or 3. So either SPI mode 1323 * rising edge ... meaning SPI modes 0 or 3. So either SPI mode
1318 * should be legit. We'll use mode 0 since the steady state is 0, 1324 * should be legit. We'll use mode 0 since the steady state is 0,
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index b56d72ff06e9..34e23489811a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -384,7 +384,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
384 u16 val[2] = {0, 0}; 384 u16 val[2] = {0, 0};
385 val[0] = mvsd_read(MVSD_FIFO); 385 val[0] = mvsd_read(MVSD_FIFO);
386 val[1] = mvsd_read(MVSD_FIFO); 386 val[1] = mvsd_read(MVSD_FIFO);
387 memcpy(p, &val, s); 387 memcpy(p, ((void *)&val) + 4 - s, s);
388 s = 0; 388 s = 0;
389 intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); 389 intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
390 } 390 }
@@ -423,7 +423,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
423 if (s < 4) { 423 if (s < 4) {
424 if (s && (intr_status & MVSD_NOR_TX_AVAIL)) { 424 if (s && (intr_status & MVSD_NOR_TX_AVAIL)) {
425 u16 val[2] = {0, 0}; 425 u16 val[2] = {0, 0};
426 memcpy(&val, p, s); 426 memcpy(((void *)&val) + 4 - s, p, s);
427 mvsd_write(MVSD_FIFO, val[0]); 427 mvsd_write(MVSD_FIFO, val[0]);
428 mvsd_write(MVSD_FIFO, val[1]); 428 mvsd_write(MVSD_FIFO, val[1]);
429 s = 0; 429 s = 0;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index d7d7109ef47e..e55ac792d68c 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -168,12 +168,12 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
168 168
169 if (data->flags & MMC_DATA_READ) { 169 if (data->flags & MMC_DATA_READ) {
170 host->dma_dir = DMA_FROM_DEVICE; 170 host->dma_dir = DMA_FROM_DEVICE;
171 dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG; 171 dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
172 DRCMR(host->dma_drcmrtx) = 0; 172 DRCMR(host->dma_drcmrtx) = 0;
173 DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD; 173 DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
174 } else { 174 } else {
175 host->dma_dir = DMA_TO_DEVICE; 175 host->dma_dir = DMA_TO_DEVICE;
176 dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC; 176 dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
177 DRCMR(host->dma_drcmrrx) = 0; 177 DRCMR(host->dma_drcmrrx) = 0;
178 DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD; 178 DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
179 } 179 }
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index d79fa55c3b89..1e8aa590bb39 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -158,6 +158,13 @@ static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
158 return of_host->clock; 158 return of_host->clock;
159} 159}
160 160
161static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
162{
163 struct sdhci_of_host *of_host = sdhci_priv(host);
164
165 return of_host->clock / 256 / 16;
166}
167
161static unsigned int esdhc_get_timeout_clock(struct sdhci_host *host) 168static unsigned int esdhc_get_timeout_clock(struct sdhci_host *host)
162{ 169{
163 struct sdhci_of_host *of_host = sdhci_priv(host); 170 struct sdhci_of_host *of_host = sdhci_priv(host);
@@ -184,6 +191,7 @@ static struct sdhci_of_data sdhci_esdhc = {
184 .set_clock = esdhc_set_clock, 191 .set_clock = esdhc_set_clock,
185 .enable_dma = esdhc_enable_dma, 192 .enable_dma = esdhc_enable_dma,
186 .get_max_clock = esdhc_get_max_clock, 193 .get_max_clock = esdhc_get_max_clock,
194 .get_min_clock = esdhc_get_min_clock,
187 .get_timeout_clock = esdhc_get_timeout_clock, 195 .get_timeout_clock = esdhc_get_timeout_clock,
188 }, 196 },
189}; 197};
@@ -226,7 +234,7 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev,
226 return -ENODEV; 234 return -ENODEV;
227 235
228 host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host)); 236 host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host));
229 if (!host) 237 if (IS_ERR(host))
230 return -ENOMEM; 238 return -ENOMEM;
231 239
232 of_host = sdhci_priv(host); 240 of_host = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6779b4ecab18..fc96f8cb9c0b 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -773,8 +773,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
773 } 773 }
774 774
775 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 775 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
776 sg_miter_start(&host->sg_miter, 776 int flags;
777 data->sg, data->sg_len, SG_MITER_ATOMIC); 777
778 flags = SG_MITER_ATOMIC;
779 if (host->data->flags & MMC_DATA_READ)
780 flags |= SG_MITER_TO_SG;
781 else
782 flags |= SG_MITER_FROM_SG;
783 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
778 host->blocks = data->blocks; 784 host->blocks = data->blocks;
779 } 785 }
780 786
@@ -1766,7 +1772,10 @@ int sdhci_add_host(struct sdhci_host *host)
1766 * Set host parameters. 1772 * Set host parameters.
1767 */ 1773 */
1768 mmc->ops = &sdhci_ops; 1774 mmc->ops = &sdhci_ops;
1769 mmc->f_min = host->max_clk / 256; 1775 if (host->ops->get_min_clock)
1776 mmc->f_min = host->ops->get_min_clock(host);
1777 else
1778 mmc->f_min = host->max_clk / 256;
1770 mmc->f_max = host->max_clk; 1779 mmc->f_max = host->max_clk;
1771 mmc->caps = MMC_CAP_SDIO_IRQ; 1780 mmc->caps = MMC_CAP_SDIO_IRQ;
1772 1781
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 831ddf7dcb49..c77e9ff30223 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -302,6 +302,7 @@ struct sdhci_ops {
302 302
303 int (*enable_dma)(struct sdhci_host *host); 303 int (*enable_dma)(struct sdhci_host *host);
304 unsigned int (*get_max_clock)(struct sdhci_host *host); 304 unsigned int (*get_max_clock)(struct sdhci_host *host);
305 unsigned int (*get_min_clock)(struct sdhci_host *host);
305 unsigned int (*get_timeout_clock)(struct sdhci_host *host); 306 unsigned int (*get_timeout_clock)(struct sdhci_host *host);
306}; 307};
307 308
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 5011fa73f918..1479da6d3aa6 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -194,7 +194,7 @@ static struct mtd_partition * newpart(char *s,
194 parts[this_part].name = extra_mem; 194 parts[this_part].name = extra_mem;
195 extra_mem += name_len + 1; 195 extra_mem += name_len + 1;
196 196
197 dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n", 197 dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
198 this_part, 198 this_part,
199 parts[this_part].name, 199 parts[this_part].name,
200 parts[this_part].offset, 200 parts[this_part].offset,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 59c46126a5ce..ae5fe91867e1 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -54,7 +54,7 @@
54#define SR_SRWD 0x80 /* SR write protect */ 54#define SR_SRWD 0x80 /* SR write protect */
55 55
56/* Define max times to check status register before we give up. */ 56/* Define max times to check status register before we give up. */
57#define MAX_READY_WAIT_JIFFIES (10 * HZ) /* eg. M25P128 specs 6s max sector erase */ 57#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
58#define CMD_SIZE 4 58#define CMD_SIZE 4
59 59
60#ifdef CONFIG_M25PXX_USE_FAST_READ 60#ifdef CONFIG_M25PXX_USE_FAST_READ
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 73f05227dc8c..d8cf29c01cc4 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -226,7 +226,7 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
226 if (!desperate && inftl->numfreeEUNs < 2) { 226 if (!desperate && inftl->numfreeEUNs < 2) {
227 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free " 227 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
228 "EUNs (%d)\n", inftl->numfreeEUNs); 228 "EUNs (%d)\n", inftl->numfreeEUNs);
229 return 0xffff; 229 return BLOCK_NIL;
230 } 230 }
231 231
232 /* Scan for a free block */ 232 /* Scan for a free block */
@@ -281,7 +281,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
281 silly = MAX_LOOPS; 281 silly = MAX_LOOPS;
282 while (thisEUN < inftl->nb_blocks) { 282 while (thisEUN < inftl->nb_blocks) {
283 for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { 283 for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
284 if ((BlockMap[block] != 0xffff) || BlockDeleted[block]) 284 if ((BlockMap[block] != BLOCK_NIL) ||
285 BlockDeleted[block])
285 continue; 286 continue;
286 287
287 if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) 288 if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
@@ -525,7 +526,7 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
525 if (!silly--) { 526 if (!silly--) {
526 printk(KERN_WARNING "INFTL: infinite loop in " 527 printk(KERN_WARNING "INFTL: infinite loop in "
527 "Virtual Unit Chain 0x%x\n", thisVUC); 528 "Virtual Unit Chain 0x%x\n", thisVUC);
528 return 0xffff; 529 return BLOCK_NIL;
529 } 530 }
530 531
531 /* Skip to next block in chain */ 532 /* Skip to next block in chain */
@@ -549,7 +550,7 @@ hitused:
549 * waiting to be picked up. We're going to have to fold 550 * waiting to be picked up. We're going to have to fold
550 * a chain to make room. 551 * a chain to make room.
551 */ 552 */
552 thisEUN = INFTL_makefreeblock(inftl, 0xffff); 553 thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL);
553 554
554 /* 555 /*
555 * Hopefully we free something, lets try again. 556 * Hopefully we free something, lets try again.
@@ -631,7 +632,7 @@ hitused:
631 632
632 printk(KERN_WARNING "INFTL: error folding to make room for Virtual " 633 printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
633 "Unit Chain 0x%x\n", thisVUC); 634 "Unit Chain 0x%x\n", thisVUC);
634 return 0xffff; 635 return BLOCK_NIL;
635} 636}
636 637
637/* 638/*
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 0b98654d8eed..7a58bd5522fd 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -284,13 +284,6 @@ config MTD_L440GX
284 284
285 BE VERY CAREFUL. 285 BE VERY CAREFUL.
286 286
287config MTD_SBC8240
288 tristate "Flash device on SBC8240"
289 depends on MTD_JEDECPROBE && 8260
290 help
291 Flash access on the SBC8240 board from Wind River. See
292 <http://www.windriver.com/products/sbc8240/>
293
294config MTD_TQM8XXL 287config MTD_TQM8XXL
295 tristate "CFI Flash device mapped on TQM8XXL" 288 tristate "CFI Flash device mapped on TQM8XXL"
296 depends on MTD_CFI && TQM8xxL 289 depends on MTD_CFI && TQM8xxL
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 8bae7f9850c0..5beb0662d724 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
50obj-$(CONFIG_MTD_NETtel) += nettel.o 50obj-$(CONFIG_MTD_NETtel) += nettel.o
51obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o 51obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
52obj-$(CONFIG_MTD_H720X) += h720x-flash.o 52obj-$(CONFIG_MTD_H720X) += h720x-flash.o
53obj-$(CONFIG_MTD_SBC8240) += sbc8240.o
54obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o 53obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
55obj-$(CONFIG_MTD_IXP2000) += ixp2000.o 54obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
56obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o 55obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index b08a798ee254..2aac41bde8b3 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -42,10 +42,8 @@
42#include <mach/hardware.h> 42#include <mach/hardware.h>
43#include <asm/system.h> 43#include <asm/system.h>
44 44
45#define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2)
46
47struct armflash_subdev_info { 45struct armflash_subdev_info {
48 char name[SUBDEV_NAME_SIZE]; 46 char *name;
49 struct mtd_info *mtd; 47 struct mtd_info *mtd;
50 struct map_info map; 48 struct map_info map;
51 struct flash_platform_data *plat; 49 struct flash_platform_data *plat;
@@ -134,6 +132,8 @@ static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
134 map_destroy(subdev->mtd); 132 map_destroy(subdev->mtd);
135 if (subdev->map.virt) 133 if (subdev->map.virt)
136 iounmap(subdev->map.virt); 134 iounmap(subdev->map.virt);
135 kfree(subdev->name);
136 subdev->name = NULL;
137 release_mem_region(subdev->map.phys, subdev->map.size); 137 release_mem_region(subdev->map.phys, subdev->map.size);
138} 138}
139 139
@@ -177,16 +177,22 @@ static int armflash_probe(struct platform_device *dev)
177 177
178 if (nr == 1) 178 if (nr == 1)
179 /* No MTD concatenation, just use the default name */ 179 /* No MTD concatenation, just use the default name */
180 snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s", 180 subdev->name = kstrdup(dev_name(&dev->dev), GFP_KERNEL);
181 dev_name(&dev->dev));
182 else 181 else
183 snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d", 182 subdev->name = kasprintf(GFP_KERNEL, "%s-%d",
184 dev_name(&dev->dev), i); 183 dev_name(&dev->dev), i);
184 if (!subdev->name) {
185 err = -ENOMEM;
186 break;
187 }
185 subdev->plat = plat; 188 subdev->plat = plat;
186 189
187 err = armflash_subdev_probe(subdev, res); 190 err = armflash_subdev_probe(subdev, res);
188 if (err) 191 if (err) {
192 kfree(subdev->name);
193 subdev->name = NULL;
189 break; 194 break;
195 }
190 } 196 }
191 info->nr_subdev = i; 197 info->nr_subdev = i;
192 198
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index d5374cdcb163..e69de29bb2d1 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -1,250 +0,0 @@
1/*
2 * Handle mapping of the flash memory access routines on the SBC8240 board.
3 *
4 * Carolyn Smith, Tektronix, Inc.
5 *
6 * This code is GPLed
7 */
8
9/*
10 * The SBC8240 has 2 flash banks.
11 * Bank 0 is a 512 KiB AMD AM29F040B; 8 x 64 KiB sectors.
12 * It contains the U-Boot code (7 sectors) and the environment (1 sector).
13 * Bank 1 is 4 x 1 MiB AMD AM29LV800BT; 15 x 64 KiB sectors, 1 x 32 KiB sector,
14 * 2 x 8 KiB sectors, 1 x 16 KiB sectors.
15 * Both parts are JEDEC compatible.
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <asm/io.h>
22
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/map.h>
25#include <linux/mtd/cfi.h>
26
27#ifdef CONFIG_MTD_PARTITIONS
28#include <linux/mtd/partitions.h>
29#endif
30
31#define DEBUG
32
33#ifdef DEBUG
34# define debugk(fmt,args...) printk(fmt ,##args)
35#else
36# define debugk(fmt,args...)
37#endif
38
39
40#define WINDOW_ADDR0 0xFFF00000 /* 512 KiB */
41#define WINDOW_SIZE0 0x00080000
42#define BUSWIDTH0 1
43
44#define WINDOW_ADDR1 0xFF000000 /* 4 MiB */
45#define WINDOW_SIZE1 0x00400000
46#define BUSWIDTH1 8
47
48#define MSG_PREFIX "sbc8240:" /* prefix for our printk()'s */
49#define MTDID "sbc8240-%d" /* for mtdparts= partitioning */
50
51
52static struct map_info sbc8240_map[2] = {
53 {
54 .name = "sbc8240 Flash Bank #0",
55 .size = WINDOW_SIZE0,
56 .bankwidth = BUSWIDTH0,
57 },
58 {
59 .name = "sbc8240 Flash Bank #1",
60 .size = WINDOW_SIZE1,
61 .bankwidth = BUSWIDTH1,
62 }
63};
64
65#define NUM_FLASH_BANKS ARRAY_SIZE(sbc8240_map)
66
67/*
68 * The following defines the partition layout of SBC8240 boards.
69 *
70 * See include/linux/mtd/partitions.h for definition of the
71 * mtd_partition structure.
72 *
73 * The *_max_flash_size is the maximum possible mapped flash size
74 * which is not necessarily the actual flash size. It must correspond
75 * to the value specified in the mapping definition defined by the
76 * "struct map_desc *_io_desc" for the corresponding machine.
77 */
78
79#ifdef CONFIG_MTD_PARTITIONS
80
81static struct mtd_partition sbc8240_uboot_partitions [] = {
82 /* Bank 0 */
83 {
84 .name = "U-boot", /* U-Boot Firmware */
85 .offset = 0,
86 .size = 0x00070000, /* 7 x 64 KiB sectors */
87 .mask_flags = MTD_WRITEABLE, /* force read-only */
88 },
89 {
90 .name = "environment", /* U-Boot environment */
91 .offset = 0x00070000,
92 .size = 0x00010000, /* 1 x 64 KiB sector */
93 },
94};
95
96static struct mtd_partition sbc8240_fs_partitions [] = {
97 {
98 .name = "jffs", /* JFFS filesystem */
99 .offset = 0,
100 .size = 0x003C0000, /* 4 * 15 * 64KiB */
101 },
102 {
103 .name = "tmp32",
104 .offset = 0x003C0000,
105 .size = 0x00020000, /* 4 * 32KiB */
106 },
107 {
108 .name = "tmp8a",
109 .offset = 0x003E0000,
110 .size = 0x00008000, /* 4 * 8KiB */
111 },
112 {
113 .name = "tmp8b",
114 .offset = 0x003E8000,
115 .size = 0x00008000, /* 4 * 8KiB */
116 },
117 {
118 .name = "tmp16",
119 .offset = 0x003F0000,
120 .size = 0x00010000, /* 4 * 16KiB */
121 }
122};
123
124/* trivial struct to describe partition information */
125struct mtd_part_def
126{
127 int nums;
128 unsigned char *type;
129 struct mtd_partition* mtd_part;
130};
131
132static struct mtd_info *sbc8240_mtd[NUM_FLASH_BANKS];
133static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS];
134
135
136#endif /* CONFIG_MTD_PARTITIONS */
137
138
139static int __init init_sbc8240_mtd (void)
140{
141 static struct _cjs {
142 u_long addr;
143 u_long size;
144 } pt[NUM_FLASH_BANKS] = {
145 {
146 .addr = WINDOW_ADDR0,
147 .size = WINDOW_SIZE0
148 },
149 {
150 .addr = WINDOW_ADDR1,
151 .size = WINDOW_SIZE1
152 },
153 };
154
155 int devicesfound = 0;
156 int i,j;
157
158 for (i = 0; i < NUM_FLASH_BANKS; i++) {
159 printk (KERN_NOTICE MSG_PREFIX
160 "Probing 0x%08lx at 0x%08lx\n", pt[i].size, pt[i].addr);
161
162 sbc8240_map[i].map_priv_1 =
163 (unsigned long) ioremap (pt[i].addr, pt[i].size);
164 if (!sbc8240_map[i].map_priv_1) {
165 printk (MSG_PREFIX "failed to ioremap\n");
166 for (j = 0; j < i; j++) {
167 iounmap((void *) sbc8240_map[j].map_priv_1);
168 sbc8240_map[j].map_priv_1 = 0;
169 }
170 return -EIO;
171 }
172 simple_map_init(&sbc8240_mtd[i]);
173
174 sbc8240_mtd[i] = do_map_probe("jedec_probe", &sbc8240_map[i]);
175
176 if (sbc8240_mtd[i]) {
177 sbc8240_mtd[i]->module = THIS_MODULE;
178 devicesfound++;
179 } else {
180 if (sbc8240_map[i].map_priv_1) {
181 iounmap((void *) sbc8240_map[i].map_priv_1);
182 sbc8240_map[i].map_priv_1 = 0;
183 }
184 }
185 }
186
187 if (!devicesfound) {
188 printk(KERN_NOTICE MSG_PREFIX
189 "No suppported flash chips found!\n");
190 return -ENXIO;
191 }
192
193#ifdef CONFIG_MTD_PARTITIONS
194 sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions;
195 sbc8240_part_banks[0].type = "static image";
196 sbc8240_part_banks[0].nums = ARRAY_SIZE(sbc8240_uboot_partitions);
197 sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions;
198 sbc8240_part_banks[1].type = "static file system";
199 sbc8240_part_banks[1].nums = ARRAY_SIZE(sbc8240_fs_partitions);
200
201 for (i = 0; i < NUM_FLASH_BANKS; i++) {
202
203 if (!sbc8240_mtd[i]) continue;
204 if (sbc8240_part_banks[i].nums == 0) {
205 printk (KERN_NOTICE MSG_PREFIX
206 "No partition info available, registering whole device\n");
207 add_mtd_device(sbc8240_mtd[i]);
208 } else {
209 printk (KERN_NOTICE MSG_PREFIX
210 "Using %s partition definition\n", sbc8240_part_banks[i].mtd_part->name);
211 add_mtd_partitions (sbc8240_mtd[i],
212 sbc8240_part_banks[i].mtd_part,
213 sbc8240_part_banks[i].nums);
214 }
215 }
216#else
217 printk(KERN_NOTICE MSG_PREFIX
218 "Registering %d flash banks at once\n", devicesfound);
219
220 for (i = 0; i < devicesfound; i++) {
221 add_mtd_device(sbc8240_mtd[i]);
222 }
223#endif /* CONFIG_MTD_PARTITIONS */
224
225 return devicesfound == 0 ? -ENXIO : 0;
226}
227
228static void __exit cleanup_sbc8240_mtd (void)
229{
230 int i;
231
232 for (i = 0; i < NUM_FLASH_BANKS; i++) {
233 if (sbc8240_mtd[i]) {
234 del_mtd_device (sbc8240_mtd[i]);
235 map_destroy (sbc8240_mtd[i]);
236 }
237 if (sbc8240_map[i].map_priv_1) {
238 iounmap ((void *) sbc8240_map[i].map_priv_1);
239 sbc8240_map[i].map_priv_1 = 0;
240 }
241 }
242}
243
244module_init (init_sbc8240_mtd);
245module_exit (cleanup_sbc8240_mtd);
246
247MODULE_LICENSE ("GPL");
248MODULE_AUTHOR ("Carolyn Smith <carolyn.smith@tektronix.com>");
249MODULE_DESCRIPTION ("MTD map driver for SBC8240 boards");
250
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index c3f62654b6df..7baba40c1ed2 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -144,7 +144,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
144 struct mtd_blktrans_ops *tr = dev->tr; 144 struct mtd_blktrans_ops *tr = dev->tr;
145 int ret = -ENODEV; 145 int ret = -ENODEV;
146 146
147 if (!try_module_get(dev->mtd->owner)) 147 if (!get_mtd_device(NULL, dev->mtd->index))
148 goto out; 148 goto out;
149 149
150 if (!try_module_get(tr->owner)) 150 if (!try_module_get(tr->owner))
@@ -158,7 +158,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
158 ret = 0; 158 ret = 0;
159 if (tr->open && (ret = tr->open(dev))) { 159 if (tr->open && (ret = tr->open(dev))) {
160 dev->mtd->usecount--; 160 dev->mtd->usecount--;
161 module_put(dev->mtd->owner); 161 put_mtd_device(dev->mtd);
162 out_tr: 162 out_tr:
163 module_put(tr->owner); 163 module_put(tr->owner);
164 } 164 }
@@ -177,7 +177,7 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode)
177 177
178 if (!ret) { 178 if (!ret) {
179 dev->mtd->usecount--; 179 dev->mtd->usecount--;
180 module_put(dev->mtd->owner); 180 put_mtd_device(dev->mtd);
181 module_put(tr->owner); 181 module_put(tr->owner);
182 } 182 }
183 183
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 208c6faa0358..77db5ce24d92 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -29,6 +29,8 @@ static struct mtdblk_dev {
29 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; 29 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
30} *mtdblks[MAX_MTD_DEVICES]; 30} *mtdblks[MAX_MTD_DEVICES];
31 31
32static struct mutex mtdblks_lock;
33
32/* 34/*
33 * Cache stuff... 35 * Cache stuff...
34 * 36 *
@@ -270,15 +272,19 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
270 272
271 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); 273 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
272 274
275 mutex_lock(&mtdblks_lock);
273 if (mtdblks[dev]) { 276 if (mtdblks[dev]) {
274 mtdblks[dev]->count++; 277 mtdblks[dev]->count++;
278 mutex_unlock(&mtdblks_lock);
275 return 0; 279 return 0;
276 } 280 }
277 281
278 /* OK, it's not open. Create cache info for it */ 282 /* OK, it's not open. Create cache info for it */
279 mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL); 283 mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
280 if (!mtdblk) 284 if (!mtdblk) {
285 mutex_unlock(&mtdblks_lock);
281 return -ENOMEM; 286 return -ENOMEM;
287 }
282 288
283 mtdblk->count = 1; 289 mtdblk->count = 1;
284 mtdblk->mtd = mtd; 290 mtdblk->mtd = mtd;
@@ -291,6 +297,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
291 } 297 }
292 298
293 mtdblks[dev] = mtdblk; 299 mtdblks[dev] = mtdblk;
300 mutex_unlock(&mtdblks_lock);
294 301
295 DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); 302 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
296 303
@@ -304,6 +311,8 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
304 311
305 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); 312 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
306 313
314 mutex_lock(&mtdblks_lock);
315
307 mutex_lock(&mtdblk->cache_mutex); 316 mutex_lock(&mtdblk->cache_mutex);
308 write_cached_data(mtdblk); 317 write_cached_data(mtdblk);
309 mutex_unlock(&mtdblk->cache_mutex); 318 mutex_unlock(&mtdblk->cache_mutex);
@@ -316,6 +325,9 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
316 vfree(mtdblk->cache_data); 325 vfree(mtdblk->cache_data);
317 kfree(mtdblk); 326 kfree(mtdblk);
318 } 327 }
328
329 mutex_unlock(&mtdblks_lock);
330
319 DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); 331 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
320 332
321 return 0; 333 return 0;
@@ -376,6 +388,8 @@ static struct mtd_blktrans_ops mtdblock_tr = {
376 388
377static int __init init_mtdblock(void) 389static int __init init_mtdblock(void)
378{ 390{
391 mutex_init(&mtdblks_lock);
392
379 return register_mtd_blktrans(&mtdblock_tr); 393 return register_mtd_blktrans(&mtdblock_tr);
380} 394}
381 395
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index fac54a3fa3f1..00ebf7af7467 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -65,8 +65,8 @@ static void mtd_release(struct device *dev)
65static int mtd_cls_suspend(struct device *dev, pm_message_t state) 65static int mtd_cls_suspend(struct device *dev, pm_message_t state)
66{ 66{
67 struct mtd_info *mtd = dev_to_mtd(dev); 67 struct mtd_info *mtd = dev_to_mtd(dev);
68 68
69 if (mtd->suspend) 69 if (mtd && mtd->suspend)
70 return mtd->suspend(mtd); 70 return mtd->suspend(mtd);
71 else 71 else
72 return 0; 72 return 0;
@@ -76,7 +76,7 @@ static int mtd_cls_resume(struct device *dev)
76{ 76{
77 struct mtd_info *mtd = dev_to_mtd(dev); 77 struct mtd_info *mtd = dev_to_mtd(dev);
78 78
79 if (mtd->resume) 79 if (mtd && mtd->resume)
80 mtd->resume(mtd); 80 mtd->resume(mtd);
81 return 0; 81 return 0;
82} 82}
@@ -298,6 +298,7 @@ int add_mtd_device(struct mtd_info *mtd)
298 mtd->dev.class = &mtd_class; 298 mtd->dev.class = &mtd_class;
299 mtd->dev.devt = MTD_DEVT(i); 299 mtd->dev.devt = MTD_DEVT(i);
300 dev_set_name(&mtd->dev, "mtd%d", i); 300 dev_set_name(&mtd->dev, "mtd%d", i);
301 dev_set_drvdata(&mtd->dev, mtd);
301 if (device_register(&mtd->dev) != 0) { 302 if (device_register(&mtd->dev) != 0) {
302 mtd_table[i] = NULL; 303 mtd_table[i] = NULL;
303 break; 304 break;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2802992b39da..20c828ba9405 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -534,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
534 &num_partitions); 534 &num_partitions);
535 535
536 if ((!partitions) || (num_partitions == 0)) { 536 if ((!partitions) || (num_partitions == 0)) {
537 printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n"); 537 printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
538 res = ENXIO; 538 res = ENXIO;
539 goto err_no_partitions; 539 goto err_no_partitions;
540 } 540 }
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0cd76f89f4b0..ebd07e95b814 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,8 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/jiffies.h>
15#include <linux/sched.h>
14#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
15#include <linux/mtd/nand.h> 17#include <linux/mtd/nand.h>
16#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
@@ -541,7 +543,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
541 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 543 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
542 mtd); 544 mtd);
543 unsigned long timeo = jiffies; 545 unsigned long timeo = jiffies;
544 int status, state = this->state; 546 int status = NAND_STATUS_FAIL, state = this->state;
545 547
546 if (state == FL_ERASING) 548 if (state == FL_ERASING)
547 timeo += (HZ * 400) / 1000; 549 timeo += (HZ * 400) / 1000;
@@ -556,8 +558,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
556 558
557 while (time_before(jiffies, timeo)) { 559 while (time_before(jiffies, timeo)) {
558 status = __raw_readb(this->IO_ADDR_R); 560 status = __raw_readb(this->IO_ADDR_R);
559 if (!(status & 0x40)) 561 if (status & NAND_STATUS_READY)
560 break; 562 break;
563 cond_resched();
561 } 564 }
562 return status; 565 return status;
563} 566}
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index e3f8495a94c2..fb86cacd5bdb 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -208,7 +208,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
208 /* Normally, we force a fold to happen before we run out of free blocks completely */ 208 /* Normally, we force a fold to happen before we run out of free blocks completely */
209 if (!desperate && nftl->numfreeEUNs < 2) { 209 if (!desperate && nftl->numfreeEUNs < 2) {
210 DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n"); 210 DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
211 return 0xffff; 211 return BLOCK_NIL;
212 } 212 }
213 213
214 /* Scan for a free block */ 214 /* Scan for a free block */
@@ -230,11 +230,11 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
230 printk("Argh! No free blocks found! LastFreeEUN = %d, " 230 printk("Argh! No free blocks found! LastFreeEUN = %d, "
231 "FirstEUN = %d\n", nftl->LastFreeEUN, 231 "FirstEUN = %d\n", nftl->LastFreeEUN,
232 le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN)); 232 le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
233 return 0xffff; 233 return BLOCK_NIL;
234 } 234 }
235 } while (pot != nftl->LastFreeEUN); 235 } while (pot != nftl->LastFreeEUN);
236 236
237 return 0xffff; 237 return BLOCK_NIL;
238} 238}
239 239
240static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock ) 240static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
@@ -431,7 +431,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
431 431
432 /* add the header so that it is now a valid chain */ 432 /* add the header so that it is now a valid chain */
433 oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC); 433 oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
434 oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff; 434 oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL;
435 435
436 nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8, 436 nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
437 8, &retlen, (char *)&oob.u); 437 8, &retlen, (char *)&oob.u);
@@ -515,7 +515,7 @@ static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
515 if (ChainLength < 2) { 515 if (ChainLength < 2) {
516 printk(KERN_WARNING "No Virtual Unit Chains available for folding. " 516 printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
517 "Failing request\n"); 517 "Failing request\n");
518 return 0xffff; 518 return BLOCK_NIL;
519 } 519 }
520 520
521 return NFTL_foldchain (nftl, LongestChain, pendingblock); 521 return NFTL_foldchain (nftl, LongestChain, pendingblock);
@@ -578,7 +578,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
578 printk(KERN_WARNING 578 printk(KERN_WARNING
579 "Infinite loop in Virtual Unit Chain 0x%x\n", 579 "Infinite loop in Virtual Unit Chain 0x%x\n",
580 thisVUC); 580 thisVUC);
581 return 0xffff; 581 return BLOCK_NIL;
582 } 582 }
583 583
584 /* Skip to next block in chain */ 584 /* Skip to next block in chain */
@@ -601,7 +601,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
601 //u16 startEUN = nftl->EUNtable[thisVUC]; 601 //u16 startEUN = nftl->EUNtable[thisVUC];
602 602
603 //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC); 603 //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
604 writeEUN = NFTL_makefreeblock(nftl, 0xffff); 604 writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL);
605 605
606 if (writeEUN == BLOCK_NIL) { 606 if (writeEUN == BLOCK_NIL) {
607 /* OK, we accept that the above comment is 607 /* OK, we accept that the above comment is
@@ -673,7 +673,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
673 673
674 printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n", 674 printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
675 thisVUC); 675 thisVUC);
676 return 0xffff; 676 return BLOCK_NIL;
677} 677}
678 678
679static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block, 679static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 38d656b9b2ee..0108ed42e877 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -266,7 +266,7 @@ static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
266 266
267 if (ONENAND_CURRENT_BUFFERRAM(this)) { 267 if (ONENAND_CURRENT_BUFFERRAM(this)) {
268 if (area == ONENAND_DATARAM) 268 if (area == ONENAND_DATARAM)
269 return mtd->writesize; 269 return this->writesize;
270 if (area == ONENAND_SPARERAM) 270 if (area == ONENAND_SPARERAM)
271 return mtd->oobsize; 271 return mtd->oobsize;
272 } 272 }
@@ -770,6 +770,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
770 } 770 }
771 iounmap(c->onenand.base); 771 iounmap(c->onenand.base);
772 release_mem_region(c->phys_base, ONENAND_IO_SIZE); 772 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
773 gpmc_cs_free(c->gpmc_cs);
773 kfree(c); 774 kfree(c);
774 775
775 return 0; 776 return 0;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 286ed594e5a0..e1f7d0a78b9d 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -657,6 +657,11 @@ static int io_init(struct ubi_device *ubi)
657 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) 657 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
658 ubi->bad_allowed = 1; 658 ubi->bad_allowed = 1;
659 659
660 if (ubi->mtd->type == MTD_NORFLASH) {
661 ubi_assert(ubi->mtd->writesize == 1);
662 ubi->nor_flash = 1;
663 }
664
660 ubi->min_io_size = ubi->mtd->writesize; 665 ubi->min_io_size = ubi->mtd->writesize;
661 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 666 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
662 667
@@ -996,6 +1001,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
996 ubi_msg("number of PEBs reserved for bad PEB handling: %d", 1001 ubi_msg("number of PEBs reserved for bad PEB handling: %d",
997 ubi->beb_rsvd_pebs); 1002 ubi->beb_rsvd_pebs);
998 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); 1003 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
1004 ubi_msg("image sequence number: %d", ubi->image_seq);
999 1005
1000 /* 1006 /*
1001 * The below lock makes sure we do not race with 'ubi_thread()' which 1007 * The below lock makes sure we do not race with 'ubi_thread()' which
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index c0ed60e8ade9..54b0186915fb 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -44,6 +44,8 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
44 be32_to_cpu(ec_hdr->vid_hdr_offset)); 44 be32_to_cpu(ec_hdr->vid_hdr_offset));
45 printk(KERN_DEBUG "\tdata_offset %d\n", 45 printk(KERN_DEBUG "\tdata_offset %d\n",
46 be32_to_cpu(ec_hdr->data_offset)); 46 be32_to_cpu(ec_hdr->data_offset));
47 printk(KERN_DEBUG "\timage_seq %d\n",
48 be32_to_cpu(ec_hdr->image_seq));
47 printk(KERN_DEBUG "\thdr_crc %#08x\n", 49 printk(KERN_DEBUG "\thdr_crc %#08x\n",
48 be32_to_cpu(ec_hdr->hdr_crc)); 50 be32_to_cpu(ec_hdr->hdr_crc));
49 printk(KERN_DEBUG "erase counter header hexdump:\n"); 51 printk(KERN_DEBUG "erase counter header hexdump:\n");
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 13777e5beac9..a4da7a09b949 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -93,6 +93,12 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
93#define UBI_IO_DEBUG 0 93#define UBI_IO_DEBUG 0
94#endif 94#endif
95 95
96#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
97int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
98#else
99#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
100#endif
101
96#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT 102#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
97#define DBG_DISABLE_BGT 1 103#define DBG_DISABLE_BGT 1
98#else 104#else
@@ -167,6 +173,7 @@ static inline int ubi_dbg_is_erase_failure(void)
167#define ubi_dbg_is_bitflip() 0 173#define ubi_dbg_is_bitflip() 0
168#define ubi_dbg_is_write_failure() 0 174#define ubi_dbg_is_write_failure() 0
169#define ubi_dbg_is_erase_failure() 0 175#define ubi_dbg_is_erase_failure() 0
176#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
170 177
171#endif /* !CONFIG_MTD_UBI_DEBUG */ 178#endif /* !CONFIG_MTD_UBI_DEBUG */
172#endif /* !__UBI_DEBUG_H__ */ 179#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 95aaac03f938..b5e478fa2661 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -332,6 +332,7 @@ static int gluebi_create(struct ubi_device_info *di,
332 } 332 }
333 333
334 gluebi->vol_id = vi->vol_id; 334 gluebi->vol_id = vi->vol_id;
335 gluebi->ubi_num = vi->ubi_num;
335 mtd->type = MTD_UBIVOLUME; 336 mtd->type = MTD_UBIVOLUME;
336 if (!di->ro_mode) 337 if (!di->ro_mode)
337 mtd->flags = MTD_WRITEABLE; 338 mtd->flags = MTD_WRITEABLE;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index effaff28bab1..4cb69925d8d9 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -98,17 +98,12 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
98static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); 98static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
99static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, 99static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
100 const struct ubi_vid_hdr *vid_hdr); 100 const struct ubi_vid_hdr *vid_hdr);
101static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
102 int len);
103static int paranoid_check_empty(struct ubi_device *ubi, int pnum);
104#else 101#else
105#define paranoid_check_not_bad(ubi, pnum) 0 102#define paranoid_check_not_bad(ubi, pnum) 0
106#define paranoid_check_peb_ec_hdr(ubi, pnum) 0 103#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
107#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0 104#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
108#define paranoid_check_peb_vid_hdr(ubi, pnum) 0 105#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
109#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0 106#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
110#define paranoid_check_all_ff(ubi, pnum, offset, len) 0
111#define paranoid_check_empty(ubi, pnum) 0
112#endif 107#endif
113 108
114/** 109/**
@@ -244,7 +239,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
244 return err > 0 ? -EINVAL : err; 239 return err > 0 ? -EINVAL : err;
245 240
246 /* The area we are writing to has to contain all 0xFF bytes */ 241 /* The area we are writing to has to contain all 0xFF bytes */
247 err = paranoid_check_all_ff(ubi, pnum, offset, len); 242 err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
248 if (err) 243 if (err)
249 return err > 0 ? -EINVAL : err; 244 return err > 0 ? -EINVAL : err;
250 245
@@ -271,8 +266,8 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
271 addr = (loff_t)pnum * ubi->peb_size + offset; 266 addr = (loff_t)pnum * ubi->peb_size + offset;
272 err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf); 267 err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
273 if (err) { 268 if (err) {
274 ubi_err("error %d while writing %d bytes to PEB %d:%d, written" 269 ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
275 " %zd bytes", err, len, pnum, offset, written); 270 "%zd bytes", err, len, pnum, offset, written);
276 ubi_dbg_dump_stack(); 271 ubi_dbg_dump_stack();
277 } else 272 } else
278 ubi_assert(written == len); 273 ubi_assert(written == len);
@@ -350,7 +345,7 @@ retry:
350 return -EIO; 345 return -EIO;
351 } 346 }
352 347
353 err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size); 348 err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size);
354 if (err) 349 if (err)
355 return err > 0 ? -EINVAL : err; 350 return err > 0 ? -EINVAL : err;
356 351
@@ -459,6 +454,54 @@ out:
459} 454}
460 455
461/** 456/**
457 * nor_erase_prepare - prepare a NOR flash PEB for erasure.
458 * @ubi: UBI device description object
459 * @pnum: physical eraseblock number to prepare
460 *
461 * NOR flash, or at least some of them, have peculiar embedded PEB erasure
462 * algorithm: the PEB is first filled with zeroes, then it is erased. And
463 * filling with zeroes starts from the end of the PEB. This was observed with
464 * Spansion S29GL512N NOR flash.
465 *
466 * This means that in case of a power cut we may end up with intact data at the
467 * beginning of the PEB, and all zeroes at the end of PEB. In other words, the
468 * EC and VID headers are OK, but a large chunk of data at the end of PEB is
469 * zeroed. This makes UBI mistakenly treat this PEB as used and associate it
470 * with an LEB, which leads to subsequent failures (e.g., UBIFS fails).
471 *
472 * This function is called before erasing NOR PEBs and it zeroes out EC and VID
473 * magic numbers in order to invalidate them and prevent the failures. Returns
474 * zero in case of success and a negative error code in case of failure.
475 */
476static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
477{
478 int err;
479 size_t written;
480 loff_t addr;
481 uint32_t data = 0;
482
483 addr = (loff_t)pnum * ubi->peb_size;
484 err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
485 if (err) {
486 ubi_err("error %d while writing 4 bytes to PEB %d:%d, written "
487 "%zd bytes", err, pnum, 0, written);
488 ubi_dbg_dump_stack();
489 return err;
490 }
491
492 addr += ubi->vid_hdr_aloffset;
493 err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
494 if (err) {
495 ubi_err("error %d while writing 4 bytes to PEB %d:%d, written "
496 "%zd bytes", err, pnum, ubi->vid_hdr_aloffset, written);
497 ubi_dbg_dump_stack();
498 return err;
499 }
500
501 return 0;
502}
503
504/**
462 * ubi_io_sync_erase - synchronously erase a physical eraseblock. 505 * ubi_io_sync_erase - synchronously erase a physical eraseblock.
463 * @ubi: UBI device description object 506 * @ubi: UBI device description object
464 * @pnum: physical eraseblock number to erase 507 * @pnum: physical eraseblock number to erase
@@ -489,6 +532,12 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
489 return -EROFS; 532 return -EROFS;
490 } 533 }
491 534
535 if (ubi->nor_flash) {
536 err = nor_erase_prepare(ubi, pnum);
537 if (err)
538 return err;
539 }
540
492 if (torture) { 541 if (torture) {
493 ret = torture_peb(ubi, pnum); 542 ret = torture_peb(ubi, pnum);
494 if (ret < 0) 543 if (ret < 0)
@@ -672,11 +721,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
672 if (read_err != -EBADMSG && 721 if (read_err != -EBADMSG &&
673 check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { 722 check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
674 /* The physical eraseblock is supposedly empty */ 723 /* The physical eraseblock is supposedly empty */
675 err = paranoid_check_all_ff(ubi, pnum, 0,
676 ubi->peb_size);
677 if (err)
678 return err > 0 ? UBI_IO_BAD_EC_HDR : err;
679
680 if (verbose) 724 if (verbose)
681 ubi_warn("no EC header found at PEB %d, " 725 ubi_warn("no EC header found at PEB %d, "
682 "only 0xFF bytes", pnum); 726 "only 0xFF bytes", pnum);
@@ -752,6 +796,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
752 ec_hdr->version = UBI_VERSION; 796 ec_hdr->version = UBI_VERSION;
753 ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset); 797 ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
754 ec_hdr->data_offset = cpu_to_be32(ubi->leb_start); 798 ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
799 ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
755 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); 800 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
756 ec_hdr->hdr_crc = cpu_to_be32(crc); 801 ec_hdr->hdr_crc = cpu_to_be32(crc);
757 802
@@ -947,15 +992,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
947 if (read_err != -EBADMSG && 992 if (read_err != -EBADMSG &&
948 check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { 993 check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
949 /* The physical eraseblock is supposedly free */ 994 /* The physical eraseblock is supposedly free */
950
951 /*
952 * The below is just a paranoid check, it has to be
953 * compiled out if paranoid checks are disabled.
954 */
955 err = paranoid_check_empty(ubi, pnum);
956 if (err)
957 return err > 0 ? UBI_IO_BAD_VID_HDR : err;
958
959 if (verbose) 995 if (verbose)
960 ubi_warn("no VID header found at PEB %d, " 996 ubi_warn("no VID header found at PEB %d, "
961 "only 0xFF bytes", pnum); 997 "only 0xFF bytes", pnum);
@@ -1229,7 +1265,7 @@ exit:
1229} 1265}
1230 1266
1231/** 1267/**
1232 * paranoid_check_all_ff - check that a region of flash is empty. 1268 * ubi_dbg_check_all_ff - check that a region of flash is empty.
1233 * @ubi: UBI device description object 1269 * @ubi: UBI device description object
1234 * @pnum: the physical eraseblock number to check 1270 * @pnum: the physical eraseblock number to check
1235 * @offset: the starting offset within the physical eraseblock to check 1271 * @offset: the starting offset within the physical eraseblock to check
@@ -1239,8 +1275,7 @@ exit:
1239 * @offset of the physical eraseblock @pnum, %1 if not, and a negative error 1275 * @offset of the physical eraseblock @pnum, %1 if not, and a negative error
1240 * code if an error occurred. 1276 * code if an error occurred.
1241 */ 1277 */
1242static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, 1278int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1243 int len)
1244{ 1279{
1245 size_t read; 1280 size_t read;
1246 int err; 1281 int err;
@@ -1276,74 +1311,4 @@ error:
1276 return err; 1311 return err;
1277} 1312}
1278 1313
1279/**
1280 * paranoid_check_empty - whether a PEB is empty.
1281 * @ubi: UBI device description object
1282 * @pnum: the physical eraseblock number to check
1283 *
1284 * This function makes sure PEB @pnum is empty, which means it contains only
1285 * %0xFF data bytes. Returns zero if the PEB is empty, %1 if not, and a
1286 * negative error code in case of failure.
1287 *
1288 * Empty PEBs have the EC header, and do not have the VID header. The caller of
1289 * this function should have already made sure the PEB does not have the VID
1290 * header. However, this function re-checks that, because it is possible that
1291 * the header and data has already been written to the PEB.
1292 *
1293 * Let's consider a possible scenario. Suppose there are 2 tasks - A and B.
1294 * Task A is in 'wear_leveling_worker()'. It is reading VID header of PEB X to
1295 * find which LEB it corresponds to. PEB X is currently unmapped, and has no
1296 * VID header. Task B is trying to write to PEB X.
1297 *
1298 * Task A: in 'ubi_io_read_vid_hdr()': reads the VID header from PEB X. The
1299 * read data contain all 0xFF bytes;
1300 * Task B: writes VID header and some data to PEB X;
1301 * Task A: assumes PEB X is empty, calls 'paranoid_check_empty()'. And if we
1302 * do not re-read the VID header, and do not cancel the checking if it
1303 * is there, we fail.
1304 */
1305static int paranoid_check_empty(struct ubi_device *ubi, int pnum)
1306{
1307 int err, offs = ubi->vid_hdr_aloffset, len = ubi->vid_hdr_alsize;
1308 size_t read;
1309 uint32_t magic;
1310 const struct ubi_vid_hdr *vid_hdr;
1311
1312 mutex_lock(&ubi->dbg_buf_mutex);
1313 err = ubi->mtd->read(ubi->mtd, offs, len, &read, ubi->dbg_peb_buf);
1314 if (err && err != -EUCLEAN) {
1315 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
1316 "read %zd bytes", err, len, pnum, offs, read);
1317 goto error;
1318 }
1319
1320 vid_hdr = ubi->dbg_peb_buf;
1321 magic = be32_to_cpu(vid_hdr->magic);
1322 if (magic == UBI_VID_HDR_MAGIC)
1323 /* The PEB contains VID header, so it is not empty */
1324 goto out;
1325
1326 err = check_pattern(ubi->dbg_peb_buf, 0xFF, len);
1327 if (err == 0) {
1328 ubi_err("flash region at PEB %d:%d, length %d does not "
1329 "contain all 0xFF bytes", pnum, offs, len);
1330 goto fail;
1331 }
1332
1333out:
1334 mutex_unlock(&ubi->dbg_buf_mutex);
1335 return 0;
1336
1337fail:
1338 ubi_err("paranoid check failed for PEB %d", pnum);
1339 ubi_msg("hex dump of the %d-%d region", offs, offs + len);
1340 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1341 ubi->dbg_peb_buf, len, 1);
1342 err = 1;
1343error:
1344 ubi_dbg_dump_stack();
1345 mutex_unlock(&ubi->dbg_buf_mutex);
1346 return err;
1347}
1348
1349#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1314#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index c3d653ba5ca0..a423131b6171 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -757,6 +757,8 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
757 si->is_empty = 0; 757 si->is_empty = 0;
758 758
759 if (!ec_corr) { 759 if (!ec_corr) {
760 int image_seq;
761
760 /* Make sure UBI version is OK */ 762 /* Make sure UBI version is OK */
761 if (ech->version != UBI_VERSION) { 763 if (ech->version != UBI_VERSION) {
762 ubi_err("this UBI version is %d, image version is %d", 764 ubi_err("this UBI version is %d, image version is %d",
@@ -778,6 +780,18 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
778 ubi_dbg_dump_ec_hdr(ech); 780 ubi_dbg_dump_ec_hdr(ech);
779 return -EINVAL; 781 return -EINVAL;
780 } 782 }
783
784 image_seq = be32_to_cpu(ech->image_seq);
785 if (!si->image_seq_set) {
786 ubi->image_seq = image_seq;
787 si->image_seq_set = 1;
788 } else if (ubi->image_seq != image_seq) {
789 ubi_err("bad image sequence number %d in PEB %d, "
790 "expected %d", image_seq, pnum, ubi->image_seq);
791 ubi_dbg_dump_ec_hdr(ech);
792 return -EINVAL;
793 }
794
781 } 795 }
782 796
783 /* OK, we've done with the EC header, let's look at the VID header */ 797 /* OK, we've done with the EC header, let's look at the VID header */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index 61df208e2f20..1017cf12def5 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -102,6 +102,7 @@ struct ubi_scan_volume {
102 * @mean_ec: mean erase counter value 102 * @mean_ec: mean erase counter value
103 * @ec_sum: a temporary variable used when calculating @mean_ec 103 * @ec_sum: a temporary variable used when calculating @mean_ec
104 * @ec_count: a temporary variable used when calculating @mean_ec 104 * @ec_count: a temporary variable used when calculating @mean_ec
105 * @image_seq_set: indicates @ubi->image_seq is known
105 * 106 *
106 * This data structure contains the result of scanning and may be used by other 107 * This data structure contains the result of scanning and may be used by other
107 * UBI sub-systems to build final UBI data structures, further error-recovery 108 * UBI sub-systems to build final UBI data structures, further error-recovery
@@ -124,6 +125,7 @@ struct ubi_scan_info {
124 int mean_ec; 125 int mean_ec;
125 uint64_t ec_sum; 126 uint64_t ec_sum;
126 int ec_count; 127 int ec_count;
128 int image_seq_set;
127}; 129};
128 130
129struct ubi_device; 131struct ubi_device;
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 8419fdccc79c..503ea9b27309 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -129,6 +129,7 @@ enum {
129 * @ec: the erase counter 129 * @ec: the erase counter
130 * @vid_hdr_offset: where the VID header starts 130 * @vid_hdr_offset: where the VID header starts
131 * @data_offset: where the user data start 131 * @data_offset: where the user data start
132 * @image_seq: image sequence number
132 * @padding2: reserved for future, zeroes 133 * @padding2: reserved for future, zeroes
133 * @hdr_crc: erase counter header CRC checksum 134 * @hdr_crc: erase counter header CRC checksum
134 * 135 *
@@ -144,6 +145,14 @@ enum {
144 * volume identifier header and user data, relative to the beginning of the 145 * volume identifier header and user data, relative to the beginning of the
145 * physical eraseblock. These values have to be the same for all physical 146 * physical eraseblock. These values have to be the same for all physical
146 * eraseblocks. 147 * eraseblocks.
148 *
149 * The @image_seq field is used to validate a UBI image that has been prepared
150 * for a UBI device. The @image_seq value can be any value, but it must be the
151 * same on all eraseblocks. UBI will ensure that all new erase counter headers
152 * also contain this value, and will check the value when scanning at start-up.
153 * One way to make use of @image_seq is to increase its value by one every time
154 * an image is flashed over an existing image, then, if the flashing does not
155 * complete, UBI will detect the error when scanning.
147 */ 156 */
148struct ubi_ec_hdr { 157struct ubi_ec_hdr {
149 __be32 magic; 158 __be32 magic;
@@ -152,7 +161,8 @@ struct ubi_ec_hdr {
152 __be64 ec; /* Warning: the current limit is 31-bit anyway! */ 161 __be64 ec; /* Warning: the current limit is 31-bit anyway! */
153 __be32 vid_hdr_offset; 162 __be32 vid_hdr_offset;
154 __be32 data_offset; 163 __be32 data_offset;
155 __u8 padding2[36]; 164 __be32 image_seq;
165 __u8 padding2[32];
156 __be32 hdr_crc; 166 __be32 hdr_crc;
157} __attribute__ ((packed)); 167} __attribute__ ((packed));
158 168
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 28acd133c997..6a5fe9633783 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -301,6 +301,7 @@ struct ubi_wl_entry;
301 * @vol->readers, @vol->writers, @vol->exclusive, 301 * @vol->readers, @vol->writers, @vol->exclusive,
302 * @vol->ref_count, @vol->mapping and @vol->eba_tbl. 302 * @vol->ref_count, @vol->mapping and @vol->eba_tbl.
303 * @ref_count: count of references on the UBI device 303 * @ref_count: count of references on the UBI device
304 * @image_seq: image sequence number recorded on EC headers
304 * 305 *
305 * @rsvd_pebs: count of reserved physical eraseblocks 306 * @rsvd_pebs: count of reserved physical eraseblocks
306 * @avail_pebs: count of available physical eraseblocks 307 * @avail_pebs: count of available physical eraseblocks
@@ -372,6 +373,7 @@ struct ubi_wl_entry;
372 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset 373 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
373 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or 374 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
374 * not 375 * not
376 * @nor_flash: non-zero if working on top of NOR flash
375 * @mtd: MTD device descriptor 377 * @mtd: MTD device descriptor
376 * 378 *
377 * @peb_buf1: a buffer of PEB size used for different purposes 379 * @peb_buf1: a buffer of PEB size used for different purposes
@@ -390,6 +392,7 @@ struct ubi_device {
390 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; 392 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
391 spinlock_t volumes_lock; 393 spinlock_t volumes_lock;
392 int ref_count; 394 int ref_count;
395 int image_seq;
393 396
394 int rsvd_pebs; 397 int rsvd_pebs;
395 int avail_pebs; 398 int avail_pebs;
@@ -452,7 +455,8 @@ struct ubi_device {
452 int vid_hdr_offset; 455 int vid_hdr_offset;
453 int vid_hdr_aloffset; 456 int vid_hdr_aloffset;
454 int vid_hdr_shift; 457 int vid_hdr_shift;
455 int bad_allowed; 458 unsigned int bad_allowed:1;
459 unsigned int nor_flash:1;
456 struct mtd_info *mtd; 460 struct mtd_info *mtd;
457 461
458 void *peb_buf1; 462 void *peb_buf1;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 2b2472300610..600c7229d5cf 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -459,6 +459,14 @@ retry:
459 dbg_wl("PEB %d EC %d", e->pnum, e->ec); 459 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
460 prot_queue_add(ubi, e); 460 prot_queue_add(ubi, e);
461 spin_unlock(&ubi->wl_lock); 461 spin_unlock(&ubi->wl_lock);
462
463 err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
464 ubi->peb_size - ubi->vid_hdr_aloffset);
465 if (err) {
466 ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
467 return err > 0 ? -EINVAL : err;
468 }
469
462 return e->pnum; 470 return e->pnum;
463} 471}
464 472
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 3e00fa8ea65f..4a7c32895be5 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -832,7 +832,9 @@ static int corkscrew_open(struct net_device *dev)
832 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 832 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
833 vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); 833 vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
834 } 834 }
835 vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ 835 if (i != 0)
836 vp->rx_ring[i - 1].next =
837 isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
836 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); 838 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
837 } 839 }
838 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ 840 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index c34aee91250b..c20416850948 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2721,13 +2721,15 @@ dump_tx_ring(struct net_device *dev)
2721 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); 2721 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2722 issue_and_wait(dev, DownStall); 2722 issue_and_wait(dev, DownStall);
2723 for (i = 0; i < TX_RING_SIZE; i++) { 2723 for (i = 0; i < TX_RING_SIZE; i++) {
2724 pr_err(" %d: @%p length %8.8x status %8.8x\n", i, 2724 unsigned int length;
2725 &vp->tx_ring[i], 2725
2726#if DO_ZEROCOPY 2726#if DO_ZEROCOPY
2727 le32_to_cpu(vp->tx_ring[i].frag[0].length), 2727 length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
2728#else 2728#else
2729 le32_to_cpu(vp->tx_ring[i].length), 2729 length = le32_to_cpu(vp->tx_ring[i].length);
2730#endif 2730#endif
2731 pr_err(" %d: @%p length %8.8x status %8.8x\n",
2732 i, &vp->tx_ring[i], length,
2731 le32_to_cpu(vp->tx_ring[i].status)); 2733 le32_to_cpu(vp->tx_ring[i].status));
2732 } 2734 }
2733 if (!stalled) 2735 if (!stalled)
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 8ae72ec14456..0e2ba21d4441 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -908,6 +908,7 @@ static const struct net_device_ops rtl8139_netdev_ops = {
908 .ndo_open = rtl8139_open, 908 .ndo_open = rtl8139_open,
909 .ndo_stop = rtl8139_close, 909 .ndo_stop = rtl8139_close,
910 .ndo_get_stats = rtl8139_get_stats, 910 .ndo_get_stats = rtl8139_get_stats,
911 .ndo_change_mtu = eth_change_mtu,
911 .ndo_validate_addr = eth_validate_addr, 912 .ndo_validate_addr = eth_validate_addr,
912 .ndo_set_mac_address = rtl8139_set_mac_address, 913 .ndo_set_mac_address = rtl8139_set_mac_address,
913 .ndo_start_xmit = rtl8139_start_xmit, 914 .ndo_start_xmit = rtl8139_start_xmit,
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c155bd3ec9f1..5f6509a5f640 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1729,6 +1729,13 @@ config KS8842
1729 help 1729 help
1730 This platform driver is for Micrel KSZ8842 chip. 1730 This platform driver is for Micrel KSZ8842 chip.
1731 1731
1732config KS8851
1733 tristate "Micrel KS8851 SPI"
1734 depends on SPI
1735 select MII
1736 help
1737 SPI driver for Micrel KS8851 SPI attached network chip.
1738
1732config VIA_RHINE 1739config VIA_RHINE
1733 tristate "VIA Rhine support" 1740 tristate "VIA Rhine support"
1734 depends on NET_PCI && PCI 1741 depends on NET_PCI && PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4b58a59f211b..ead8cab3cfe1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_SKGE) += skge.o
88obj-$(CONFIG_SKY2) += sky2.o 88obj-$(CONFIG_SKY2) += sky2.o
89obj-$(CONFIG_SKFP) += skfp/ 89obj-$(CONFIG_SKFP) += skfp/
90obj-$(CONFIG_KS8842) += ks8842.o 90obj-$(CONFIG_KS8842) += ks8842.o
91obj-$(CONFIG_KS8851) += ks8851.o
91obj-$(CONFIG_VIA_RHINE) += via-rhine.o 92obj-$(CONFIG_VIA_RHINE) += via-rhine.o
92obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 93obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
93obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 94obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index 85a18175730b..08787f5a22a3 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -569,16 +569,8 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
569 569
570#ifdef DEBUG_DRIVER 570#ifdef DEBUG_DRIVER
571 /* dump the packet */ 571 /* dump the packet */
572 { 572 print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
573 int i; 573 16, 1, skb->data, 64, true);
574
575 for (i = 0; i < 64; i++) {
576 if ((i % 16) == 0)
577 printk("\n" KERN_DEBUG);
578 printk ("%2.2x ", skb->data [i]);
579 }
580 printk("\n");
581 }
582#endif 574#endif
583 entry = lp->tx_new & lp->tx_ring_mod_mask; 575 entry = lp->tx_new & lp->tx_ring_mod_mask;
584 ib->btx_ring [entry].length = (-skblen) | 0xf000; 576 ib->btx_ring [entry].length = (-skblen) | 0xf000;
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d6d4ab3b430c..7d227cdab9f8 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -158,15 +158,12 @@ module_exit(arcnet_exit);
158void arcnet_dump_skb(struct net_device *dev, 158void arcnet_dump_skb(struct net_device *dev,
159 struct sk_buff *skb, char *desc) 159 struct sk_buff *skb, char *desc)
160{ 160{
161 int i; 161 char hdr[32];
162 162
163 printk(KERN_DEBUG "%6s: skb dump (%s) follows:", dev->name, desc); 163 /* dump the packet */
164 for (i = 0; i < skb->len; i++) { 164 snprintf(hdr, sizeof(hdr), "%6s:%s skb->data:", dev->name, desc);
165 if (i % 16 == 0) 165 print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET,
166 printk("\n" KERN_DEBUG "[%04X] ", i); 166 16, 1, skb->data, skb->len, true);
167 printk("%02X ", ((u_char *) skb->data)[i]);
168 }
169 printk("\n");
170} 167}
171 168
172EXPORT_SYMBOL(arcnet_dump_skb); 169EXPORT_SYMBOL(arcnet_dump_skb);
@@ -184,6 +181,7 @@ static void arcnet_dump_packet(struct net_device *dev, int bufnum,
184 int i, length; 181 int i, length;
185 unsigned long flags = 0; 182 unsigned long flags = 0;
186 static uint8_t buf[512]; 183 static uint8_t buf[512];
184 char hdr[32];
187 185
188 /* hw.copy_from_card expects IRQ context so take the IRQ lock 186 /* hw.copy_from_card expects IRQ context so take the IRQ lock
189 to keep it single threaded */ 187 to keep it single threaded */
@@ -197,14 +195,10 @@ static void arcnet_dump_packet(struct net_device *dev, int bufnum,
197 /* if the offset[0] byte is nonzero, this is a 256-byte packet */ 195 /* if the offset[0] byte is nonzero, this is a 256-byte packet */
198 length = (buf[2] ? 256 : 512); 196 length = (buf[2] ? 256 : 512);
199 197
200 printk(KERN_DEBUG "%6s: packet dump (%s) follows:", dev->name, desc); 198 /* dump the packet */
201 for (i = 0; i < length; i++) { 199 snprintf(hdr, sizeof(hdr), "%6s:%s packet dump:", dev->name, desc);
202 if (i % 16 == 0) 200 print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET,
203 printk("\n" KERN_DEBUG "[%04X] ", i); 201 16, 1, buf, length, true);
204 printk("%02X ", buf[i]);
205 }
206 printk("\n");
207
208} 202}
209 203
210#else 204#else
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index 2895db13bfa4..c37ee9e6b67b 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -63,3 +63,11 @@ config IXP4XX_ETH
63 help 63 help
64 Say Y here if you want to use built-in Ethernet ports 64 Say Y here if you want to use built-in Ethernet ports
65 on IXP4xx processor. 65 on IXP4xx processor.
66
67config W90P910_ETH
68 tristate "Nuvoton w90p910 Ethernet support"
69 depends on ARM && ARCH_W90X900
70 select PHYLIB
71 help
72 Say Y here if you want to use built-in Ethernet ports
73 on w90p910 processor.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
index 811a3ccd14c1..303171f589e6 100644
--- a/drivers/net/arm/Makefile
+++ b/drivers/net/arm/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
11obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o 11obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o
12obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o 12obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
13obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o 13obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
14obj-$(CONFIG_W90P910_ETH) += w90p910_ether.o
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 2e7419a61191..5041d10bae9d 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -1228,7 +1228,6 @@ static int at91ether_resume(struct platform_device *pdev)
1228#endif 1228#endif
1229 1229
1230static struct platform_driver at91ether_driver = { 1230static struct platform_driver at91ether_driver = {
1231 .probe = at91ether_probe,
1232 .remove = __devexit_p(at91ether_remove), 1231 .remove = __devexit_p(at91ether_remove),
1233 .suspend = at91ether_suspend, 1232 .suspend = at91ether_suspend,
1234 .resume = at91ether_resume, 1233 .resume = at91ether_resume,
@@ -1240,7 +1239,7 @@ static struct platform_driver at91ether_driver = {
1240 1239
1241static int __init at91ether_init(void) 1240static int __init at91ether_init(void)
1242{ 1241{
1243 return platform_driver_register(&at91ether_driver); 1242 return platform_driver_probe(&at91ether_driver, at91ether_probe);
1244} 1243}
1245 1244
1246static void __exit at91ether_exit(void) 1245static void __exit at91ether_exit(void)
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 6f42ad728915..3fe09876e76d 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -1142,7 +1142,9 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
1142 .ndo_start_xmit = eth_xmit, 1142 .ndo_start_xmit = eth_xmit,
1143 .ndo_set_multicast_list = eth_set_mcast_list, 1143 .ndo_set_multicast_list = eth_set_mcast_list,
1144 .ndo_do_ioctl = eth_ioctl, 1144 .ndo_do_ioctl = eth_ioctl,
1145 1145 .ndo_change_mtu = eth_change_mtu,
1146 .ndo_set_mac_address = eth_mac_addr,
1147 .ndo_validate_addr = eth_validate_addr,
1146}; 1148};
1147 1149
1148static int __devinit eth_init_one(struct platform_device *pdev) 1150static int __devinit eth_init_one(struct platform_device *pdev)
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
new file mode 100644
index 000000000000..616fb7985a34
--- /dev/null
+++ b/drivers/net/arm/w90p910_ether.c
@@ -0,0 +1,1105 @@
1/*
2 * Copyright (c) 2008-2009 Nuvoton technology corporation.
3 *
4 * Wan ZongShun <mcuos.com@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation;version 2 of the License.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/mii.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/skbuff.h>
18#include <linux/ethtool.h>
19#include <linux/platform_device.h>
20#include <linux/clk.h>
21
22#define DRV_MODULE_NAME "w90p910-emc"
23#define DRV_MODULE_VERSION "0.1"
24
25/* Ethernet MAC Registers */
26#define REG_CAMCMR 0x00
27#define REG_CAMEN 0x04
28#define REG_CAMM_BASE 0x08
29#define REG_CAML_BASE 0x0c
30#define REG_TXDLSA 0x88
31#define REG_RXDLSA 0x8C
32#define REG_MCMDR 0x90
33#define REG_MIID 0x94
34#define REG_MIIDA 0x98
35#define REG_FFTCR 0x9C
36#define REG_TSDR 0xa0
37#define REG_RSDR 0xa4
38#define REG_DMARFC 0xa8
39#define REG_MIEN 0xac
40#define REG_MISTA 0xb0
41#define REG_CTXDSA 0xcc
42#define REG_CTXBSA 0xd0
43#define REG_CRXDSA 0xd4
44#define REG_CRXBSA 0xd8
45
46/* mac controller bit */
47#define MCMDR_RXON 0x01
48#define MCMDR_ACP (0x01 << 3)
49#define MCMDR_SPCRC (0x01 << 5)
50#define MCMDR_TXON (0x01 << 8)
51#define MCMDR_FDUP (0x01 << 18)
52#define MCMDR_ENMDC (0x01 << 19)
53#define MCMDR_OPMOD (0x01 << 20)
54#define SWR (0x01 << 24)
55
56/* cam command regiser */
57#define CAMCMR_AUP 0x01
58#define CAMCMR_AMP (0x01 << 1)
59#define CAMCMR_ABP (0x01 << 2)
60#define CAMCMR_CCAM (0x01 << 3)
61#define CAMCMR_ECMP (0x01 << 4)
62#define CAM0EN 0x01
63
64/* mac mii controller bit */
65#define MDCCR (0x0a << 20)
66#define PHYAD (0x01 << 8)
67#define PHYWR (0x01 << 16)
68#define PHYBUSY (0x01 << 17)
69#define PHYPRESP (0x01 << 18)
70#define CAM_ENTRY_SIZE 0x08
71
72/* rx and tx status */
73#define TXDS_TXCP (0x01 << 19)
74#define RXDS_CRCE (0x01 << 17)
75#define RXDS_PTLE (0x01 << 19)
76#define RXDS_RXGD (0x01 << 20)
77#define RXDS_ALIE (0x01 << 21)
78#define RXDS_RP (0x01 << 22)
79
80/* mac interrupt status*/
81#define MISTA_EXDEF (0x01 << 19)
82#define MISTA_TXBERR (0x01 << 24)
83#define MISTA_TDU (0x01 << 23)
84#define MISTA_RDU (0x01 << 10)
85#define MISTA_RXBERR (0x01 << 11)
86
87#define ENSTART 0x01
88#define ENRXINTR 0x01
89#define ENRXGD (0x01 << 4)
90#define ENRXBERR (0x01 << 11)
91#define ENTXINTR (0x01 << 16)
92#define ENTXCP (0x01 << 18)
93#define ENTXABT (0x01 << 21)
94#define ENTXBERR (0x01 << 24)
95#define ENMDC (0x01 << 19)
96#define PHYBUSY (0x01 << 17)
97#define MDCCR_VAL 0xa00000
98
99/* rx and tx owner bit */
100#define RX_OWEN_DMA (0x01 << 31)
101#define RX_OWEN_CPU (~(0x03 << 30))
102#define TX_OWEN_DMA (0x01 << 31)
103#define TX_OWEN_CPU (~(0x01 << 31))
104
105/* tx frame desc controller bit */
106#define MACTXINTEN 0x04
107#define CRCMODE 0x02
108#define PADDINGMODE 0x01
109
110/* fftcr controller bit */
111#define TXTHD (0x03 << 8)
112#define BLENGTH (0x01 << 20)
113
114/* global setting for driver */
115#define RX_DESC_SIZE 50
116#define TX_DESC_SIZE 10
117#define MAX_RBUFF_SZ 0x600
118#define MAX_TBUFF_SZ 0x600
119#define TX_TIMEOUT 50
120#define DELAY 1000
121#define CAM0 0x0
122
123static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg);
124
125struct w90p910_rxbd {
126 unsigned int sl;
127 unsigned int buffer;
128 unsigned int reserved;
129 unsigned int next;
130};
131
132struct w90p910_txbd {
133 unsigned int mode;
134 unsigned int buffer;
135 unsigned int sl;
136 unsigned int next;
137};
138
139struct recv_pdesc {
140 struct w90p910_rxbd desclist[RX_DESC_SIZE];
141 char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ];
142};
143
144struct tran_pdesc {
145 struct w90p910_txbd desclist[TX_DESC_SIZE];
146 char tran_buf[RX_DESC_SIZE][MAX_TBUFF_SZ];
147};
148
149struct w90p910_ether {
150 struct recv_pdesc *rdesc;
151 struct recv_pdesc *rdesc_phys;
152 struct tran_pdesc *tdesc;
153 struct tran_pdesc *tdesc_phys;
154 struct net_device_stats stats;
155 struct platform_device *pdev;
156 struct sk_buff *skb;
157 struct clk *clk;
158 struct clk *rmiiclk;
159 struct mii_if_info mii;
160 struct timer_list check_timer;
161 void __iomem *reg;
162 unsigned int rxirq;
163 unsigned int txirq;
164 unsigned int cur_tx;
165 unsigned int cur_rx;
166 unsigned int finish_tx;
167 unsigned int rx_packets;
168 unsigned int rx_bytes;
169 unsigned int start_tx_ptr;
170 unsigned int start_rx_ptr;
171 unsigned int linkflag;
172 spinlock_t lock;
173};
174
175static void update_linkspeed_register(struct net_device *dev,
176 unsigned int speed, unsigned int duplex)
177{
178 struct w90p910_ether *ether = netdev_priv(dev);
179 unsigned int val;
180
181 val = __raw_readl(ether->reg + REG_MCMDR);
182
183 if (speed == SPEED_100) {
184 /* 100 full/half duplex */
185 if (duplex == DUPLEX_FULL) {
186 val |= (MCMDR_OPMOD | MCMDR_FDUP);
187 } else {
188 val |= MCMDR_OPMOD;
189 val &= ~MCMDR_FDUP;
190 }
191 } else {
192 /* 10 full/half duplex */
193 if (duplex == DUPLEX_FULL) {
194 val |= MCMDR_FDUP;
195 val &= ~MCMDR_OPMOD;
196 } else {
197 val &= ~(MCMDR_FDUP | MCMDR_OPMOD);
198 }
199 }
200
201 __raw_writel(val, ether->reg + REG_MCMDR);
202}
203
204static void update_linkspeed(struct net_device *dev)
205{
206 struct w90p910_ether *ether = netdev_priv(dev);
207 struct platform_device *pdev;
208 unsigned int bmsr, bmcr, lpa, speed, duplex;
209
210 pdev = ether->pdev;
211
212 if (!mii_link_ok(&ether->mii)) {
213 ether->linkflag = 0x0;
214 netif_carrier_off(dev);
215 dev_warn(&pdev->dev, "%s: Link down.\n", dev->name);
216 return;
217 }
218
219 if (ether->linkflag == 1)
220 return;
221
222 bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR);
223 bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR);
224
225 if (bmcr & BMCR_ANENABLE) {
226 if (!(bmsr & BMSR_ANEGCOMPLETE))
227 return;
228
229 lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA);
230
231 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF))
232 speed = SPEED_100;
233 else
234 speed = SPEED_10;
235
236 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL))
237 duplex = DUPLEX_FULL;
238 else
239 duplex = DUPLEX_HALF;
240
241 } else {
242 speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
243 duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
244 }
245
246 update_linkspeed_register(dev, speed, duplex);
247
248 dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed,
249 (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
250 ether->linkflag = 0x01;
251
252 netif_carrier_on(dev);
253}
254
255static void w90p910_check_link(unsigned long dev_id)
256{
257 struct net_device *dev = (struct net_device *) dev_id;
258 struct w90p910_ether *ether = netdev_priv(dev);
259
260 update_linkspeed(dev);
261 mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
262}
263
264static void w90p910_write_cam(struct net_device *dev,
265 unsigned int x, unsigned char *pval)
266{
267 struct w90p910_ether *ether = netdev_priv(dev);
268 unsigned int msw, lsw;
269
270 msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3];
271
272 lsw = (pval[4] << 24) | (pval[5] << 16);
273
274 __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE);
275 __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE);
276}
277
278static void w90p910_init_desc(struct net_device *dev)
279{
280 struct w90p910_ether *ether;
281 struct w90p910_txbd *tdesc, *tdesc_phys;
282 struct w90p910_rxbd *rdesc, *rdesc_phys;
283 unsigned int i, j;
284
285 ether = netdev_priv(dev);
286
287 ether->tdesc = (struct tran_pdesc *)
288 dma_alloc_coherent(NULL, sizeof(struct tran_pdesc),
289 (dma_addr_t *) &ether->tdesc_phys, GFP_KERNEL);
290
291 ether->rdesc = (struct recv_pdesc *)
292 dma_alloc_coherent(NULL, sizeof(struct recv_pdesc),
293 (dma_addr_t *) &ether->rdesc_phys, GFP_KERNEL);
294
295 for (i = 0; i < TX_DESC_SIZE; i++) {
296 tdesc = &(ether->tdesc->desclist[i]);
297
298 j = ((i + 1) / TX_DESC_SIZE);
299
300 if (j != 0) {
301 tdesc_phys = &(ether->tdesc_phys->desclist[0]);
302 ether->start_tx_ptr = (unsigned int)tdesc_phys;
303 tdesc->next = (unsigned int)ether->start_tx_ptr;
304 } else {
305 tdesc_phys = &(ether->tdesc_phys->desclist[i+1]);
306 tdesc->next = (unsigned int)tdesc_phys;
307 }
308
309 tdesc->buffer = (unsigned int)ether->tdesc_phys->tran_buf[i];
310 tdesc->sl = 0;
311 tdesc->mode = 0;
312 }
313
314 for (i = 0; i < RX_DESC_SIZE; i++) {
315 rdesc = &(ether->rdesc->desclist[i]);
316
317 j = ((i + 1) / RX_DESC_SIZE);
318
319 if (j != 0) {
320 rdesc_phys = &(ether->rdesc_phys->desclist[0]);
321 ether->start_rx_ptr = (unsigned int)rdesc_phys;
322 rdesc->next = (unsigned int)ether->start_rx_ptr;
323 } else {
324 rdesc_phys = &(ether->rdesc_phys->desclist[i+1]);
325 rdesc->next = (unsigned int)rdesc_phys;
326 }
327
328 rdesc->sl = RX_OWEN_DMA;
329 rdesc->buffer = (unsigned int)ether->rdesc_phys->recv_buf[i];
330 }
331}
332
333static void w90p910_set_fifo_threshold(struct net_device *dev)
334{
335 struct w90p910_ether *ether = netdev_priv(dev);
336 unsigned int val;
337
338 val = TXTHD | BLENGTH;
339 __raw_writel(val, ether->reg + REG_FFTCR);
340}
341
342static void w90p910_return_default_idle(struct net_device *dev)
343{
344 struct w90p910_ether *ether = netdev_priv(dev);
345 unsigned int val;
346
347 val = __raw_readl(ether->reg + REG_MCMDR);
348 val |= SWR;
349 __raw_writel(val, ether->reg + REG_MCMDR);
350}
351
352static void w90p910_trigger_rx(struct net_device *dev)
353{
354 struct w90p910_ether *ether = netdev_priv(dev);
355
356 __raw_writel(ENSTART, ether->reg + REG_RSDR);
357}
358
359static void w90p910_trigger_tx(struct net_device *dev)
360{
361 struct w90p910_ether *ether = netdev_priv(dev);
362
363 __raw_writel(ENSTART, ether->reg + REG_TSDR);
364}
365
366static void w90p910_enable_mac_interrupt(struct net_device *dev)
367{
368 struct w90p910_ether *ether = netdev_priv(dev);
369 unsigned int val;
370
371 val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP;
372 val |= ENTXBERR | ENRXBERR | ENTXABT;
373
374 __raw_writel(val, ether->reg + REG_MIEN);
375}
376
377static void w90p910_get_and_clear_int(struct net_device *dev,
378 unsigned int *val)
379{
380 struct w90p910_ether *ether = netdev_priv(dev);
381
382 *val = __raw_readl(ether->reg + REG_MISTA);
383 __raw_writel(*val, ether->reg + REG_MISTA);
384}
385
386static void w90p910_set_global_maccmd(struct net_device *dev)
387{
388 struct w90p910_ether *ether = netdev_priv(dev);
389 unsigned int val;
390
391 val = __raw_readl(ether->reg + REG_MCMDR);
392 val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC;
393 __raw_writel(val, ether->reg + REG_MCMDR);
394}
395
396static void w90p910_enable_cam(struct net_device *dev)
397{
398 struct w90p910_ether *ether = netdev_priv(dev);
399 unsigned int val;
400
401 w90p910_write_cam(dev, CAM0, dev->dev_addr);
402
403 val = __raw_readl(ether->reg + REG_CAMEN);
404 val |= CAM0EN;
405 __raw_writel(val, ether->reg + REG_CAMEN);
406}
407
408static void w90p910_enable_cam_command(struct net_device *dev)
409{
410 struct w90p910_ether *ether = netdev_priv(dev);
411 unsigned int val;
412
413 val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP;
414 __raw_writel(val, ether->reg + REG_CAMCMR);
415}
416
417static void w90p910_enable_tx(struct net_device *dev, unsigned int enable)
418{
419 struct w90p910_ether *ether = netdev_priv(dev);
420 unsigned int val;
421
422 val = __raw_readl(ether->reg + REG_MCMDR);
423
424 if (enable)
425 val |= MCMDR_TXON;
426 else
427 val &= ~MCMDR_TXON;
428
429 __raw_writel(val, ether->reg + REG_MCMDR);
430}
431
432static void w90p910_enable_rx(struct net_device *dev, unsigned int enable)
433{
434 struct w90p910_ether *ether = netdev_priv(dev);
435 unsigned int val;
436
437 val = __raw_readl(ether->reg + REG_MCMDR);
438
439 if (enable)
440 val |= MCMDR_RXON;
441 else
442 val &= ~MCMDR_RXON;
443
444 __raw_writel(val, ether->reg + REG_MCMDR);
445}
446
447static void w90p910_set_curdest(struct net_device *dev)
448{
449 struct w90p910_ether *ether = netdev_priv(dev);
450
451 __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA);
452 __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA);
453}
454
455static void w90p910_reset_mac(struct net_device *dev)
456{
457 struct w90p910_ether *ether = netdev_priv(dev);
458
459 spin_lock(&ether->lock);
460
461 w90p910_enable_tx(dev, 0);
462 w90p910_enable_rx(dev, 0);
463 w90p910_set_fifo_threshold(dev);
464 w90p910_return_default_idle(dev);
465
466 if (!netif_queue_stopped(dev))
467 netif_stop_queue(dev);
468
469 w90p910_init_desc(dev);
470
471 dev->trans_start = jiffies;
472 ether->cur_tx = 0x0;
473 ether->finish_tx = 0x0;
474 ether->cur_rx = 0x0;
475
476 w90p910_set_curdest(dev);
477 w90p910_enable_cam(dev);
478 w90p910_enable_cam_command(dev);
479 w90p910_enable_mac_interrupt(dev);
480 w90p910_enable_tx(dev, 1);
481 w90p910_enable_rx(dev, 1);
482 w90p910_trigger_tx(dev);
483 w90p910_trigger_rx(dev);
484
485 dev->trans_start = jiffies;
486
487 if (netif_queue_stopped(dev))
488 netif_wake_queue(dev);
489
490 spin_unlock(&ether->lock);
491}
492
493static void w90p910_mdio_write(struct net_device *dev,
494 int phy_id, int reg, int data)
495{
496 struct w90p910_ether *ether = netdev_priv(dev);
497 struct platform_device *pdev;
498 unsigned int val, i;
499
500 pdev = ether->pdev;
501
502 __raw_writel(data, ether->reg + REG_MIID);
503
504 val = (phy_id << 0x08) | reg;
505 val |= PHYBUSY | PHYWR | MDCCR_VAL;
506 __raw_writel(val, ether->reg + REG_MIIDA);
507
508 for (i = 0; i < DELAY; i++) {
509 if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
510 break;
511 }
512
513 if (i == DELAY)
514 dev_warn(&pdev->dev, "mdio write timed out\n");
515}
516
517static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg)
518{
519 struct w90p910_ether *ether = netdev_priv(dev);
520 struct platform_device *pdev;
521 unsigned int val, i, data;
522
523 pdev = ether->pdev;
524
525 val = (phy_id << 0x08) | reg;
526 val |= PHYBUSY | MDCCR_VAL;
527 __raw_writel(val, ether->reg + REG_MIIDA);
528
529 for (i = 0; i < DELAY; i++) {
530 if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
531 break;
532 }
533
534 if (i == DELAY) {
535 dev_warn(&pdev->dev, "mdio read timed out\n");
536 data = 0xffff;
537 } else {
538 data = __raw_readl(ether->reg + REG_MIID);
539 }
540
541 return data;
542}
543
544static int set_mac_address(struct net_device *dev, void *addr)
545{
546 struct sockaddr *address = addr;
547
548 if (!is_valid_ether_addr(address->sa_data))
549 return -EADDRNOTAVAIL;
550
551 memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
552 w90p910_write_cam(dev, CAM0, dev->dev_addr);
553
554 return 0;
555}
556
557static int w90p910_ether_close(struct net_device *dev)
558{
559 struct w90p910_ether *ether = netdev_priv(dev);
560
561 dma_free_writecombine(NULL, sizeof(struct w90p910_rxbd),
562 ether->rdesc, (dma_addr_t)ether->rdesc_phys);
563 dma_free_writecombine(NULL, sizeof(struct w90p910_txbd),
564 ether->tdesc, (dma_addr_t)ether->tdesc_phys);
565
566 netif_stop_queue(dev);
567
568 del_timer_sync(&ether->check_timer);
569 clk_disable(ether->rmiiclk);
570 clk_disable(ether->clk);
571
572 free_irq(ether->txirq, dev);
573 free_irq(ether->rxirq, dev);
574
575 return 0;
576}
577
578static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
579{
580 struct w90p910_ether *ether;
581
582 ether = netdev_priv(dev);
583
584 return &ether->stats;
585}
586
587static int w90p910_send_frame(struct net_device *dev,
588 unsigned char *data, int length)
589{
590 struct w90p910_ether *ether;
591 struct w90p910_txbd *txbd;
592 struct platform_device *pdev;
593 unsigned char *buffer;
594
595 ether = netdev_priv(dev);
596 pdev = ether->pdev;
597
598 txbd = &ether->tdesc->desclist[ether->cur_tx];
599 buffer = ether->tdesc->tran_buf[ether->cur_tx];
600 if (length > 1514) {
601 dev_err(&pdev->dev, "send data %d bytes, check it\n", length);
602 length = 1514;
603 }
604
605 txbd->sl = length & 0xFFFF;
606
607 memcpy(buffer, data, length);
608
609 txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN;
610
611 w90p910_enable_tx(dev, 1);
612
613 w90p910_trigger_tx(dev);
614
615 ether->cur_tx = (ether->cur_tx+1) % TX_DESC_SIZE;
616 txbd = &ether->tdesc->desclist[ether->cur_tx];
617
618 dev->trans_start = jiffies;
619
620 if (txbd->mode & TX_OWEN_DMA)
621 netif_stop_queue(dev);
622
623 return 0;
624}
625
626static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
627{
628 struct w90p910_ether *ether = netdev_priv(dev);
629
630 if (!(w90p910_send_frame(dev, skb->data, skb->len))) {
631 ether->skb = skb;
632 dev_kfree_skb_irq(skb);
633 return 0;
634 }
635 return -1;
636}
637
638static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
639{
640 struct w90p910_ether *ether;
641 struct w90p910_txbd *txbd;
642 struct platform_device *pdev;
643 struct tran_pdesc *tran_pdesc;
644 struct net_device *dev;
645 unsigned int cur_entry, entry, status;
646
647 dev = (struct net_device *)dev_id;
648 ether = netdev_priv(dev);
649 pdev = ether->pdev;
650
651 spin_lock(&ether->lock);
652
653 w90p910_get_and_clear_int(dev, &status);
654
655 cur_entry = __raw_readl(ether->reg + REG_CTXDSA);
656
657 tran_pdesc = ether->tdesc_phys;
658 entry = (unsigned int)(&tran_pdesc->desclist[ether->finish_tx]);
659
660 while (entry != cur_entry) {
661 txbd = &ether->tdesc->desclist[ether->finish_tx];
662
663 ether->finish_tx = (ether->finish_tx + 1) % TX_DESC_SIZE;
664
665 if (txbd->sl & TXDS_TXCP) {
666 ether->stats.tx_packets++;
667 ether->stats.tx_bytes += txbd->sl & 0xFFFF;
668 } else {
669 ether->stats.tx_errors++;
670 }
671
672 txbd->sl = 0x0;
673 txbd->mode = 0x0;
674
675 if (netif_queue_stopped(dev))
676 netif_wake_queue(dev);
677
678 entry = (unsigned int)(&tran_pdesc->desclist[ether->finish_tx]);
679 }
680
681 if (status & MISTA_EXDEF) {
682 dev_err(&pdev->dev, "emc defer exceed interrupt\n");
683 } else if (status & MISTA_TXBERR) {
684 dev_err(&pdev->dev, "emc bus error interrupt\n");
685 w90p910_reset_mac(dev);
686 } else if (status & MISTA_TDU) {
687 if (netif_queue_stopped(dev))
688 netif_wake_queue(dev);
689 }
690
691 spin_unlock(&ether->lock);
692
693 return IRQ_HANDLED;
694}
695
696static void netdev_rx(struct net_device *dev)
697{
698 struct w90p910_ether *ether;
699 struct w90p910_rxbd *rxbd;
700 struct platform_device *pdev;
701 struct recv_pdesc *rdesc_phys;
702 struct sk_buff *skb;
703 unsigned char *data;
704 unsigned int length, status, val, entry;
705
706 ether = netdev_priv(dev);
707 pdev = ether->pdev;
708 rdesc_phys = ether->rdesc_phys;
709
710 rxbd = &ether->rdesc->desclist[ether->cur_rx];
711
712 do {
713 val = __raw_readl(ether->reg + REG_CRXDSA);
714 entry = (unsigned int)&rdesc_phys->desclist[ether->cur_rx];
715
716 if (val == entry)
717 break;
718
719 status = rxbd->sl;
720 length = status & 0xFFFF;
721
722 if (status & RXDS_RXGD) {
723 data = ether->rdesc->recv_buf[ether->cur_rx];
724 skb = dev_alloc_skb(length+2);
725 if (!skb) {
726 dev_err(&pdev->dev, "get skb buffer error\n");
727 ether->stats.rx_dropped++;
728 return;
729 }
730
731 skb->dev = dev;
732 skb_reserve(skb, 2);
733 skb_put(skb, length);
734 skb_copy_to_linear_data(skb, data, length);
735 skb->protocol = eth_type_trans(skb, dev);
736 ether->stats.rx_packets++;
737 ether->stats.rx_bytes += length;
738 netif_rx(skb);
739 } else {
740 ether->stats.rx_errors++;
741
742 if (status & RXDS_RP) {
743 dev_err(&pdev->dev, "rx runt err\n");
744 ether->stats.rx_length_errors++;
745 } else if (status & RXDS_CRCE) {
746 dev_err(&pdev->dev, "rx crc err\n");
747 ether->stats.rx_crc_errors++;
748 }
749
750 if (status & RXDS_ALIE) {
751 dev_err(&pdev->dev, "rx aligment err\n");
752 ether->stats.rx_frame_errors++;
753 } else if (status & RXDS_PTLE) {
754 dev_err(&pdev->dev, "rx longer err\n");
755 ether->stats.rx_over_errors++;
756 }
757 }
758
759 rxbd->sl = RX_OWEN_DMA;
760 rxbd->reserved = 0x0;
761 ether->cur_rx = (ether->cur_rx+1) % RX_DESC_SIZE;
762 rxbd = &ether->rdesc->desclist[ether->cur_rx];
763
764 dev->last_rx = jiffies;
765 } while (1);
766}
767
768static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id)
769{
770 struct net_device *dev;
771 struct w90p910_ether *ether;
772 struct platform_device *pdev;
773 unsigned int status;
774
775 dev = (struct net_device *)dev_id;
776 ether = netdev_priv(dev);
777 pdev = ether->pdev;
778
779 spin_lock(&ether->lock);
780
781 w90p910_get_and_clear_int(dev, &status);
782
783 if (status & MISTA_RDU) {
784 netdev_rx(dev);
785
786 w90p910_trigger_rx(dev);
787
788 spin_unlock(&ether->lock);
789 return IRQ_HANDLED;
790 } else if (status & MISTA_RXBERR) {
791 dev_err(&pdev->dev, "emc rx bus error\n");
792 w90p910_reset_mac(dev);
793 }
794
795 netdev_rx(dev);
796 spin_unlock(&ether->lock);
797 return IRQ_HANDLED;
798}
799
800static int w90p910_ether_open(struct net_device *dev)
801{
802 struct w90p910_ether *ether;
803 struct platform_device *pdev;
804
805 ether = netdev_priv(dev);
806 pdev = ether->pdev;
807
808 w90p910_reset_mac(dev);
809 w90p910_set_fifo_threshold(dev);
810 w90p910_set_curdest(dev);
811 w90p910_enable_cam(dev);
812 w90p910_enable_cam_command(dev);
813 w90p910_enable_mac_interrupt(dev);
814 w90p910_set_global_maccmd(dev);
815 w90p910_enable_rx(dev, 1);
816
817 ether->rx_packets = 0x0;
818 ether->rx_bytes = 0x0;
819
820 if (request_irq(ether->txirq, w90p910_tx_interrupt,
821 0x0, pdev->name, dev)) {
822 dev_err(&pdev->dev, "register irq tx failed\n");
823 return -EAGAIN;
824 }
825
826 if (request_irq(ether->rxirq, w90p910_rx_interrupt,
827 0x0, pdev->name, dev)) {
828 dev_err(&pdev->dev, "register irq rx failed\n");
829 return -EAGAIN;
830 }
831
832 mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
833 netif_start_queue(dev);
834 w90p910_trigger_rx(dev);
835
836 dev_info(&pdev->dev, "%s is OPENED\n", dev->name);
837
838 return 0;
839}
840
841static void w90p910_ether_set_multicast_list(struct net_device *dev)
842{
843 struct w90p910_ether *ether;
844 unsigned int rx_mode;
845
846 ether = netdev_priv(dev);
847
848 if (dev->flags & IFF_PROMISC)
849 rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
850 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_list)
851 rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
852 else
853 rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
854 __raw_writel(rx_mode, ether->reg + REG_CAMCMR);
855}
856
857static int w90p910_ether_ioctl(struct net_device *dev,
858 struct ifreq *ifr, int cmd)
859{
860 struct w90p910_ether *ether = netdev_priv(dev);
861 struct mii_ioctl_data *data = if_mii(ifr);
862
863 return generic_mii_ioctl(&ether->mii, data, cmd, NULL);
864}
865
866static void w90p910_get_drvinfo(struct net_device *dev,
867 struct ethtool_drvinfo *info)
868{
869 strcpy(info->driver, DRV_MODULE_NAME);
870 strcpy(info->version, DRV_MODULE_VERSION);
871}
872
873static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
874{
875 struct w90p910_ether *ether = netdev_priv(dev);
876 return mii_ethtool_gset(&ether->mii, cmd);
877}
878
879static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
880{
881 struct w90p910_ether *ether = netdev_priv(dev);
882 return mii_ethtool_sset(&ether->mii, cmd);
883}
884
885static int w90p910_nway_reset(struct net_device *dev)
886{
887 struct w90p910_ether *ether = netdev_priv(dev);
888 return mii_nway_restart(&ether->mii);
889}
890
891static u32 w90p910_get_link(struct net_device *dev)
892{
893 struct w90p910_ether *ether = netdev_priv(dev);
894 return mii_link_ok(&ether->mii);
895}
896
897static const struct ethtool_ops w90p910_ether_ethtool_ops = {
898 .get_settings = w90p910_get_settings,
899 .set_settings = w90p910_set_settings,
900 .get_drvinfo = w90p910_get_drvinfo,
901 .nway_reset = w90p910_nway_reset,
902 .get_link = w90p910_get_link,
903};
904
905static const struct net_device_ops w90p910_ether_netdev_ops = {
906 .ndo_open = w90p910_ether_open,
907 .ndo_stop = w90p910_ether_close,
908 .ndo_start_xmit = w90p910_ether_start_xmit,
909 .ndo_get_stats = w90p910_ether_stats,
910 .ndo_set_multicast_list = w90p910_ether_set_multicast_list,
911 .ndo_set_mac_address = set_mac_address,
912 .ndo_do_ioctl = w90p910_ether_ioctl,
913 .ndo_validate_addr = eth_validate_addr,
914 .ndo_change_mtu = eth_change_mtu,
915};
916
917static void __init get_mac_address(struct net_device *dev)
918{
919 struct w90p910_ether *ether = netdev_priv(dev);
920 struct platform_device *pdev;
921 char addr[6];
922
923 pdev = ether->pdev;
924
925 addr[0] = 0x00;
926 addr[1] = 0x02;
927 addr[2] = 0xac;
928 addr[3] = 0x55;
929 addr[4] = 0x88;
930 addr[5] = 0xa8;
931
932 if (is_valid_ether_addr(addr))
933 memcpy(dev->dev_addr, &addr, 0x06);
934 else
935 dev_err(&pdev->dev, "invalid mac address\n");
936}
937
938static int w90p910_ether_setup(struct net_device *dev)
939{
940 struct w90p910_ether *ether = netdev_priv(dev);
941
942 ether_setup(dev);
943 dev->netdev_ops = &w90p910_ether_netdev_ops;
944 dev->ethtool_ops = &w90p910_ether_ethtool_ops;
945
946 dev->tx_queue_len = 16;
947 dev->dma = 0x0;
948 dev->watchdog_timeo = TX_TIMEOUT;
949
950 get_mac_address(dev);
951
952 spin_lock_init(&ether->lock);
953
954 ether->cur_tx = 0x0;
955 ether->cur_rx = 0x0;
956 ether->finish_tx = 0x0;
957 ether->linkflag = 0x0;
958 ether->mii.phy_id = 0x01;
959 ether->mii.phy_id_mask = 0x1f;
960 ether->mii.reg_num_mask = 0x1f;
961 ether->mii.dev = dev;
962 ether->mii.mdio_read = w90p910_mdio_read;
963 ether->mii.mdio_write = w90p910_mdio_write;
964
965 setup_timer(&ether->check_timer, w90p910_check_link,
966 (unsigned long)dev);
967
968 return 0;
969}
970
971static int __devinit w90p910_ether_probe(struct platform_device *pdev)
972{
973 struct w90p910_ether *ether;
974 struct net_device *dev;
975 struct resource *res;
976 int error;
977
978 dev = alloc_etherdev(sizeof(struct w90p910_ether));
979 if (!dev)
980 return -ENOMEM;
981
982 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
983 if (res == NULL) {
984 dev_err(&pdev->dev, "failed to get I/O memory\n");
985 error = -ENXIO;
986 goto failed_free;
987 }
988
989 res = request_mem_region(res->start, resource_size(res), pdev->name);
990 if (res == NULL) {
991 dev_err(&pdev->dev, "failed to request I/O memory\n");
992 error = -EBUSY;
993 goto failed_free;
994 }
995
996 ether = netdev_priv(dev);
997
998 ether->reg = ioremap(res->start, resource_size(res));
999 if (ether->reg == NULL) {
1000 dev_err(&pdev->dev, "failed to remap I/O memory\n");
1001 error = -ENXIO;
1002 goto failed_free_mem;
1003 }
1004
1005 ether->txirq = platform_get_irq(pdev, 0);
1006 if (ether->txirq < 0) {
1007 dev_err(&pdev->dev, "failed to get ether tx irq\n");
1008 error = -ENXIO;
1009 goto failed_free_io;
1010 }
1011
1012 ether->rxirq = platform_get_irq(pdev, 1);
1013 if (ether->rxirq < 0) {
1014 dev_err(&pdev->dev, "failed to get ether rx irq\n");
1015 error = -ENXIO;
1016 goto failed_free_txirq;
1017 }
1018
1019 platform_set_drvdata(pdev, dev);
1020
1021 ether->clk = clk_get(&pdev->dev, NULL);
1022 if (IS_ERR(ether->clk)) {
1023 dev_err(&pdev->dev, "failed to get ether clock\n");
1024 error = PTR_ERR(ether->clk);
1025 goto failed_free_rxirq;
1026 }
1027
1028 ether->rmiiclk = clk_get(&pdev->dev, "RMII");
1029 if (IS_ERR(ether->rmiiclk)) {
1030 dev_err(&pdev->dev, "failed to get ether clock\n");
1031 error = PTR_ERR(ether->rmiiclk);
1032 goto failed_put_clk;
1033 }
1034
1035 ether->pdev = pdev;
1036
1037 w90p910_ether_setup(dev);
1038
1039 error = register_netdev(dev);
1040 if (error != 0) {
1041 dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n");
1042 error = -ENODEV;
1043 goto failed_put_rmiiclk;
1044 }
1045
1046 return 0;
1047failed_put_rmiiclk:
1048 clk_put(ether->rmiiclk);
1049failed_put_clk:
1050 clk_put(ether->clk);
1051failed_free_rxirq:
1052 free_irq(ether->rxirq, pdev);
1053 platform_set_drvdata(pdev, NULL);
1054failed_free_txirq:
1055 free_irq(ether->txirq, pdev);
1056failed_free_io:
1057 iounmap(ether->reg);
1058failed_free_mem:
1059 release_mem_region(res->start, resource_size(res));
1060failed_free:
1061 free_netdev(dev);
1062 return error;
1063}
1064
1065static int __devexit w90p910_ether_remove(struct platform_device *pdev)
1066{
1067 struct net_device *dev = platform_get_drvdata(pdev);
1068 struct w90p910_ether *ether = netdev_priv(dev);
1069
1070 unregister_netdev(dev);
1071 clk_put(ether->rmiiclk);
1072 clk_put(ether->clk);
1073 del_timer_sync(&ether->check_timer);
1074 platform_set_drvdata(pdev, NULL);
1075 free_netdev(dev);
1076 return 0;
1077}
1078
1079static struct platform_driver w90p910_ether_driver = {
1080 .probe = w90p910_ether_probe,
1081 .remove = __devexit_p(w90p910_ether_remove),
1082 .driver = {
1083 .name = "w90p910-emc",
1084 .owner = THIS_MODULE,
1085 },
1086};
1087
1088static int __init w90p910_ether_init(void)
1089{
1090 return platform_driver_register(&w90p910_ether_driver);
1091}
1092
1093static void __exit w90p910_ether_exit(void)
1094{
1095 platform_driver_unregister(&w90p910_ether_driver);
1096}
1097
1098module_init(w90p910_ether_init);
1099module_exit(w90p910_ether_exit);
1100
1101MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
1102MODULE_DESCRIPTION("w90p910 MAC driver!");
1103MODULE_LICENSE("GPL");
1104MODULE_ALIAS("platform:w90p910-emc");
1105
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 18b566ad4fd1..cf30e278f182 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -318,7 +318,7 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr)
318 pos3 = mca_read_stored_pos( slot, 3 ); 318 pos3 = mca_read_stored_pos( slot, 3 );
319 pos4 = mca_read_stored_pos( slot, 4 ); 319 pos4 = mca_read_stored_pos( slot, 4 );
320 320
321 for (l_i = 0; l_i < 0x09; l_i++) 321 for (l_i = 0; l_i < 8; l_i++)
322 if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i]) 322 if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i])
323 break; 323 break;
324 ioaddr = at1700_mca_probe_list[l_i]; 324 ioaddr = at1700_mca_probe_list[l_i];
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index e1658ef3fcdf..2a1120ad2e74 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -188,14 +188,14 @@ struct atl1c_tpd_ext_desc {
188#define RRS_HDS_TYPE_DATA 2 188#define RRS_HDS_TYPE_DATA 2
189 189
190#define RRS_IS_NO_HDS_TYPE(flag) \ 190#define RRS_IS_NO_HDS_TYPE(flag) \
191 (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == 0) 191 ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0)
192 192
193#define RRS_IS_HDS_HEAD(flag) \ 193#define RRS_IS_HDS_HEAD(flag) \
194 (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \ 194 ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
195 RRS_HDS_TYPE_HEAD) 195 RRS_HDS_TYPE_HEAD)
196 196
197#define RRS_IS_HDS_DATA(flag) \ 197#define RRS_IS_HDS_DATA(flag) \
198 (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \ 198 ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
199 RRS_HDS_TYPE_DATA) 199 RRS_HDS_TYPE_DATA)
200 200
201/* rrs word 3 bit 0:31 */ 201/* rrs word 3 bit 0:31 */
@@ -245,7 +245,7 @@ struct atl1c_tpd_ext_desc {
245#define RRS_PACKET_TYPE_802_3 1 245#define RRS_PACKET_TYPE_802_3 1
246#define RRS_PACKET_TYPE_ETH 0 246#define RRS_PACKET_TYPE_ETH 0
247#define RRS_PACKET_IS_ETH(word) \ 247#define RRS_PACKET_IS_ETH(word) \
248 (((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK == \ 248 ((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \
249 RRS_PACKET_TYPE_ETH) 249 RRS_PACKET_TYPE_ETH)
250#define RRS_RXD_IS_VALID(word) \ 250#define RRS_RXD_IS_VALID(word) \
251 ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1) 251 ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1)
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index e4afbd628c23..607007d75b6f 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -281,6 +281,8 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
281 if (wol->wolopts & WAKE_PHY) 281 if (wol->wolopts & WAKE_PHY)
282 adapter->wol |= AT_WUFC_LNKC; 282 adapter->wol |= AT_WUFC_LNKC;
283 283
284 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
285
284 return 0; 286 return 0;
285} 287}
286 288
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index cd547a205fb9..a383122679de 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1689,7 +1689,7 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
1689 if (likely(RRS_RXD_IS_VALID(rrs->word3))) { 1689 if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
1690 rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) & 1690 rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
1691 RRS_RX_RFD_CNT_MASK; 1691 RRS_RX_RFD_CNT_MASK;
1692 if (unlikely(rfd_num) != 1) 1692 if (unlikely(rfd_num != 1))
1693 /* TODO support mul rfd*/ 1693 /* TODO support mul rfd*/
1694 if (netif_msg_rx_err(adapter)) 1694 if (netif_msg_rx_err(adapter))
1695 dev_warn(&pdev->dev, 1695 dev_warn(&pdev->dev,
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 619c6583e1aa..4003955d7a96 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -365,6 +365,8 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
365 if (wol->wolopts & WAKE_PHY) 365 if (wol->wolopts & WAKE_PHY)
366 adapter->wol |= AT_WUFC_LNKC; 366 adapter->wol |= AT_WUFC_LNKC;
367 367
368 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
369
368 return 0; 370 return 0;
369} 371}
370 372
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index c734b1983ec1..204db961029e 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -2071,7 +2071,7 @@ static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2071 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) 2071 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
2072 return -EOPNOTSUPP; 2072 return -EOPNOTSUPP;
2073 2073
2074 if (wol->wolopts & (WAKE_MCAST|WAKE_BCAST|WAKE_MCAST)) 2074 if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
2075 return -EOPNOTSUPP; 2075 return -EOPNOTSUPP;
2076 2076
2077 /* these settings will always override what we currently have */ 2077 /* these settings will always override what we currently have */
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index f703758f0a6e..5b4bf3d2cdc2 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev)
73#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 73#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
74 74
75#define BE_MAX_LRO_DESCRIPTORS 16 75#define BE_MAX_LRO_DESCRIPTORS 16
76#define BE_MAX_FRAGS_PER_FRAME 16 76#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
77 77
78struct be_dma_mem { 78struct be_dma_mem {
79 void *va; 79 void *va;
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9592f22e4c8c..cccc5419ad72 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
162 return -EINVAL; 162 return -EINVAL;
163 163
164 adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; 164 adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
165 if (adapter->max_rx_coal > MAX_SKB_FRAGS) 165 if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
166 adapter->max_rx_coal = MAX_SKB_FRAGS - 1; 166 adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
167 167
168 /* if AIC is being turned on now, start with an EQD of 0 */ 168 /* if AIC is being turned on now, start with an EQD of 0 */
169 if (rx_eq->enable_aic == 0 && 169 if (rx_eq->enable_aic == 0 &&
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index b02e805c1db3..29c33c709c6d 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -55,6 +55,10 @@
55#define MEMBAR_CTRL_INT_CTRL_PFUNC_MASK 0x7 /* bits 26 - 28 */ 55#define MEMBAR_CTRL_INT_CTRL_PFUNC_MASK 0x7 /* bits 26 - 28 */
56#define MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT 26 56#define MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT 26
57 57
58/********* ISR0 Register offset **********/
59#define CEV_ISR0_OFFSET 0xC18
60#define CEV_ISR_SIZE 4
61
58/********* Event Q door bell *************/ 62/********* Event Q door bell *************/
59#define DB_EQ_OFFSET DB_CQ_OFFSET 63#define DB_EQ_OFFSET DB_CQ_OFFSET
60#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ 64#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66c10c87f517..dea3155688bb 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -666,8 +666,8 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
666{ 666{
667 struct be_queue_info *rxq = &adapter->rx_obj.q; 667 struct be_queue_info *rxq = &adapter->rx_obj.q;
668 struct be_rx_page_info *page_info; 668 struct be_rx_page_info *page_info;
669 u16 rxq_idx, i, num_rcvd; 669 u16 rxq_idx, i, num_rcvd, j;
670 u32 pktsize, hdr_len, curr_frag_len; 670 u32 pktsize, hdr_len, curr_frag_len, size;
671 u8 *start; 671 u8 *start;
672 672
673 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 673 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -708,23 +708,34 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
708 } 708 }
709 709
710 /* More frags present for this completion */ 710 /* More frags present for this completion */
711 pktsize -= curr_frag_len; /* account for above copied frag */ 711 size = pktsize;
712 for (i = 1; i < num_rcvd; i++) { 712 for (i = 1, j = 0; i < num_rcvd; i++) {
713 size -= curr_frag_len;
713 index_inc(&rxq_idx, rxq->len); 714 index_inc(&rxq_idx, rxq->len);
714 page_info = get_rx_page_info(adapter, rxq_idx); 715 page_info = get_rx_page_info(adapter, rxq_idx);
715 716
716 curr_frag_len = min(pktsize, rx_frag_size); 717 curr_frag_len = min(size, rx_frag_size);
718
719 /* Coalesce all frags from the same physical page in one slot */
720 if (page_info->page_offset == 0) {
721 /* Fresh page */
722 j++;
723 skb_shinfo(skb)->frags[j].page = page_info->page;
724 skb_shinfo(skb)->frags[j].page_offset =
725 page_info->page_offset;
726 skb_shinfo(skb)->frags[j].size = 0;
727 skb_shinfo(skb)->nr_frags++;
728 } else {
729 put_page(page_info->page);
730 }
717 731
718 skb_shinfo(skb)->frags[i].page = page_info->page; 732 skb_shinfo(skb)->frags[j].size += curr_frag_len;
719 skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
720 skb_shinfo(skb)->frags[i].size = curr_frag_len;
721 skb->len += curr_frag_len; 733 skb->len += curr_frag_len;
722 skb->data_len += curr_frag_len; 734 skb->data_len += curr_frag_len;
723 skb_shinfo(skb)->nr_frags++;
724 pktsize -= curr_frag_len;
725 735
726 memset(page_info, 0, sizeof(*page_info)); 736 memset(page_info, 0, sizeof(*page_info));
727 } 737 }
738 BUG_ON(j > MAX_SKB_FRAGS);
728 739
729done: 740done:
730 be_rx_stats_update(adapter, pktsize, num_rcvd); 741 be_rx_stats_update(adapter, pktsize, num_rcvd);
@@ -786,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
786 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; 797 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
787 struct be_queue_info *rxq = &adapter->rx_obj.q; 798 struct be_queue_info *rxq = &adapter->rx_obj.q;
788 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 799 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
789 u16 i, rxq_idx = 0, vid; 800 u16 i, rxq_idx = 0, vid, j;
790 801
791 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 802 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
792 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 803 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
@@ -794,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
794 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 805 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
795 806
796 remaining = pkt_size; 807 remaining = pkt_size;
797 for (i = 0; i < num_rcvd; i++) { 808 for (i = 0, j = -1; i < num_rcvd; i++) {
798 page_info = get_rx_page_info(adapter, rxq_idx); 809 page_info = get_rx_page_info(adapter, rxq_idx);
799 810
800 curr_frag_len = min(remaining, rx_frag_size); 811 curr_frag_len = min(remaining, rx_frag_size);
801 812
802 rx_frags[i].page = page_info->page; 813 /* Coalesce all frags from the same physical page in one slot */
803 rx_frags[i].page_offset = page_info->page_offset; 814 if (i == 0 || page_info->page_offset == 0) {
804 rx_frags[i].size = curr_frag_len; 815 /* First frag or Fresh page */
805 remaining -= curr_frag_len; 816 j++;
817 rx_frags[j].page = page_info->page;
818 rx_frags[j].page_offset = page_info->page_offset;
819 rx_frags[j].size = 0;
820 } else {
821 put_page(page_info->page);
822 }
823 rx_frags[j].size += curr_frag_len;
806 824
825 remaining -= curr_frag_len;
807 index_inc(&rxq_idx, rxq->len); 826 index_inc(&rxq_idx, rxq->len);
808
809 memset(page_info, 0, sizeof(*page_info)); 827 memset(page_info, 0, sizeof(*page_info));
810 } 828 }
829 BUG_ON(j > MAX_SKB_FRAGS);
811 830
812 if (likely(!vlanf)) { 831 if (likely(!vlanf)) {
813 lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, 832 lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
@@ -1255,15 +1274,17 @@ static irqreturn_t be_intx(int irq, void *dev)
1255{ 1274{
1256 struct be_adapter *adapter = dev; 1275 struct be_adapter *adapter = dev;
1257 struct be_ctrl_info *ctrl = &adapter->ctrl; 1276 struct be_ctrl_info *ctrl = &adapter->ctrl;
1258 int rx, tx; 1277 int isr;
1259 1278
1260 tx = event_handle(ctrl, &adapter->tx_eq); 1279 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
1261 rx = event_handle(ctrl, &adapter->rx_eq); 1280 ctrl->pci_func * CEV_ISR_SIZE);
1281 if (!isr)
1282 return IRQ_NONE;
1262 1283
1263 if (rx || tx) 1284 event_handle(ctrl, &adapter->tx_eq);
1264 return IRQ_HANDLED; 1285 event_handle(ctrl, &adapter->rx_eq);
1265 else 1286
1266 return IRQ_NONE; 1287 return IRQ_HANDLED;
1267} 1288}
1268 1289
1269static irqreturn_t be_msix_rx(int irq, void *dev) 1290static irqreturn_t be_msix_rx(int irq, void *dev)
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 9578a3dfac01..206144f2470f 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -428,10 +428,11 @@ bmac_init_phy(struct net_device *dev)
428 printk(KERN_DEBUG "phy registers:"); 428 printk(KERN_DEBUG "phy registers:");
429 for (addr = 0; addr < 32; ++addr) { 429 for (addr = 0; addr < 32; ++addr) {
430 if ((addr & 7) == 0) 430 if ((addr & 7) == 0)
431 printk("\n" KERN_DEBUG); 431 printk(KERN_DEBUG);
432 printk(" %.4x", bmac_mif_read(dev, addr)); 432 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
433 } 433 }
434 printk("\n"); 434 printk(KERN_CONT "\n");
435
435 if (bp->is_bmac_plus) { 436 if (bp->is_bmac_plus) {
436 unsigned int capable, ctrl; 437 unsigned int capable, ctrl;
437 438
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 8678457849f9..85a737c5c23f 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -902,6 +902,8 @@ struct bnx2x {
902 u16 rx_quick_cons_trip; 902 u16 rx_quick_cons_trip;
903 u16 rx_ticks_int; 903 u16 rx_ticks_int;
904 u16 rx_ticks; 904 u16 rx_ticks;
905/* Maximal coalescing timeout in us */
906#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
905 907
906 u32 lin_cnt; 908 u32 lin_cnt;
907 909
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ed648acef7cf..2ee581a2cdec 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -4212,13 +4212,14 @@ static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port)
4212u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, 4212u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
4213 u8 *version, u16 len) 4213 u8 *version, u16 len)
4214{ 4214{
4215 struct bnx2x *bp = params->bp; 4215 struct bnx2x *bp;
4216 u32 ext_phy_type = 0; 4216 u32 ext_phy_type = 0;
4217 u32 spirom_ver = 0; 4217 u32 spirom_ver = 0;
4218 u8 status = 0 ; 4218 u8 status = 0 ;
4219 4219
4220 if (version == NULL || params == NULL) 4220 if (version == NULL || params == NULL)
4221 return -EINVAL; 4221 return -EINVAL;
4222 bp = params->bp;
4222 4223
4223 spirom_ver = REG_RD(bp, params->shmem_base + 4224 spirom_ver = REG_RD(bp, params->shmem_base +
4224 offsetof(struct shmem_region, 4225 offsetof(struct shmem_region,
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fbf1352e9c1c..c36a5f33739f 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -484,8 +484,9 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
484 484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); 485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486 mark = ((mark + 0x3) & ~0x3); 486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark); 487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
488 488
489 printk(KERN_ERR PFX);
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { 490 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++) 491 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 492 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
@@ -500,7 +501,7 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
500 data[8] = 0x0; 501 data[8] = 0x0;
501 printk(KERN_CONT "%s", (char *)data); 502 printk(KERN_CONT "%s", (char *)data);
502 } 503 }
503 printk("\n" KERN_ERR PFX "end of fw dump\n"); 504 printk(KERN_ERR PFX "end of fw dump\n");
504} 505}
505 506
506static void bnx2x_panic_dump(struct bnx2x *bp) 507static void bnx2x_panic_dump(struct bnx2x *bp)
@@ -4434,7 +4435,7 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4434 REG_WR16(bp, BAR_USTRORM_INTMEM + 4435 REG_WR16(bp, BAR_USTRORM_INTMEM +
4435 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4436 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4436 U_SB_ETH_RX_CQ_INDEX), 4437 U_SB_ETH_RX_CQ_INDEX),
4437 bp->rx_ticks ? 0 : 1); 4438 (bp->rx_ticks/12) ? 0 : 1);
4438 4439
4439 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4440 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4440 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4441 REG_WR8(bp, BAR_CSTRORM_INTMEM +
@@ -4444,7 +4445,7 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4444 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4445 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4445 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4446 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4446 C_SB_ETH_TX_CQ_INDEX), 4447 C_SB_ETH_TX_CQ_INDEX),
4447 bp->tx_ticks ? 0 : 1); 4448 (bp->tx_ticks/12) ? 0 : 1);
4448 } 4449 }
4449} 4450}
4450 4451
@@ -7354,7 +7355,7 @@ static void bnx2x_reset_task(struct work_struct *work)
7354#ifdef BNX2X_STOP_ON_ERROR 7355#ifdef BNX2X_STOP_ON_ERROR
7355 BNX2X_ERR("reset task called but STOP_ON_ERROR defined" 7356 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7356 " so reset not done to allow debug dump,\n" 7357 " so reset not done to allow debug dump,\n"
7357 KERN_ERR " you will need to reboot when done\n"); 7358 " you will need to reboot when done\n");
7358 return; 7359 return;
7359#endif 7360#endif
7360 7361
@@ -8637,6 +8638,14 @@ static int bnx2x_nway_reset(struct net_device *dev)
8637 return 0; 8638 return 0;
8638} 8639}
8639 8640
8641static u32
8642bnx2x_get_link(struct net_device *dev)
8643{
8644 struct bnx2x *bp = netdev_priv(dev);
8645
8646 return bp->link_vars.link_up;
8647}
8648
8640static int bnx2x_get_eeprom_len(struct net_device *dev) 8649static int bnx2x_get_eeprom_len(struct net_device *dev)
8641{ 8650{
8642 struct bnx2x *bp = netdev_priv(dev); 8651 struct bnx2x *bp = netdev_priv(dev);
@@ -9061,12 +9070,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
9061 struct bnx2x *bp = netdev_priv(dev); 9070 struct bnx2x *bp = netdev_priv(dev);
9062 9071
9063 bp->rx_ticks = (u16) coal->rx_coalesce_usecs; 9072 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9064 if (bp->rx_ticks > 3000) 9073 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9065 bp->rx_ticks = 3000; 9074 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9066 9075
9067 bp->tx_ticks = (u16) coal->tx_coalesce_usecs; 9076 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9068 if (bp->tx_ticks > 0x3000) 9077 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9069 bp->tx_ticks = 0x3000; 9078 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9070 9079
9071 if (netif_running(dev)) 9080 if (netif_running(dev))
9072 bnx2x_update_coalesce(bp); 9081 bnx2x_update_coalesce(bp);
@@ -10034,7 +10043,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = {
10034 .get_msglevel = bnx2x_get_msglevel, 10043 .get_msglevel = bnx2x_get_msglevel,
10035 .set_msglevel = bnx2x_set_msglevel, 10044 .set_msglevel = bnx2x_set_msglevel,
10036 .nway_reset = bnx2x_nway_reset, 10045 .nway_reset = bnx2x_nway_reset,
10037 .get_link = ethtool_op_get_link, 10046 .get_link = bnx2x_get_link,
10038 .get_eeprom_len = bnx2x_get_eeprom_len, 10047 .get_eeprom_len = bnx2x_get_eeprom_len,
10039 .get_eeprom = bnx2x_get_eeprom, 10048 .get_eeprom = bnx2x_get_eeprom,
10040 .set_eeprom = bnx2x_set_eeprom, 10049 .set_eeprom = bnx2x_set_eeprom,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d927f71af8a3..aa1be1feceed 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1459,8 +1459,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1459 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond 1459 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1460 */ 1460 */
1461 if (bond->slave_cnt == 0) { 1461 if (bond->slave_cnt == 0) {
1462 if (slave_dev->type != ARPHRD_ETHER) 1462 if (bond_dev->type != slave_dev->type) {
1463 bond_setup_by_slave(bond_dev, slave_dev); 1463 dev_close(bond_dev);
1464 pr_debug("%s: change device type from %d to %d\n",
1465 bond_dev->name, bond_dev->type, slave_dev->type);
1466 if (slave_dev->type != ARPHRD_ETHER)
1467 bond_setup_by_slave(bond_dev, slave_dev);
1468 else
1469 ether_setup(bond_dev);
1470 dev_open(bond_dev);
1471 }
1464 } else if (bond_dev->type != slave_dev->type) { 1472 } else if (bond_dev->type != slave_dev->type) {
1465 pr_err(DRV_NAME ": %s ether type (%d) is different " 1473 pr_err(DRV_NAME ": %s ether type (%d) is different "
1466 "from other slaves (%d), can not enslave it.\n", 1474 "from other slaves (%d), can not enslave it.\n",
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 574daddc21bf..9e4283aff828 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -346,7 +346,7 @@ void can_restart(unsigned long data)
346 skb = dev_alloc_skb(sizeof(struct can_frame)); 346 skb = dev_alloc_skb(sizeof(struct can_frame));
347 if (skb == NULL) { 347 if (skb == NULL) {
348 err = -ENOMEM; 348 err = -ENOMEM;
349 goto out; 349 goto restart;
350 } 350 }
351 skb->dev = dev; 351 skb->dev = dev;
352 skb->protocol = htons(ETH_P_CAN); 352 skb->protocol = htons(ETH_P_CAN);
@@ -361,13 +361,13 @@ void can_restart(unsigned long data)
361 stats->rx_packets++; 361 stats->rx_packets++;
362 stats->rx_bytes += cf->can_dlc; 362 stats->rx_bytes += cf->can_dlc;
363 363
364restart:
364 dev_dbg(dev->dev.parent, "restarted\n"); 365 dev_dbg(dev->dev.parent, "restarted\n");
365 priv->can_stats.restarts++; 366 priv->can_stats.restarts++;
366 367
367 /* Now restart the device */ 368 /* Now restart the device */
368 err = priv->do_set_mode(dev, CAN_MODE_START); 369 err = priv->do_set_mode(dev, CAN_MODE_START);
369 370
370out:
371 netif_carrier_on(dev); 371 netif_carrier_on(dev);
372 if (err) 372 if (err)
373 dev_err(dev->dev.parent, "Error %d during restart", err); 373 dev_err(dev->dev.parent, "Error %d during restart", err);
@@ -473,6 +473,10 @@ int open_candev(struct net_device *dev)
473 return -EINVAL; 473 return -EINVAL;
474 } 474 }
475 475
476 /* Switch carrier on if device was stopped while in bus-off state */
477 if (!netif_carrier_ok(dev))
478 netif_carrier_on(dev);
479
476 setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev); 480 setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
477 481
478 return 0; 482 return 0;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 571f133a8fec..08ebee79d8a6 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -63,7 +63,6 @@
63#include <linux/can.h> 63#include <linux/can.h>
64#include <linux/can/dev.h> 64#include <linux/can/dev.h>
65#include <linux/can/error.h> 65#include <linux/can/error.h>
66#include <linux/can/dev.h>
67 66
68#include "sja1000.h" 67#include "sja1000.h"
69 68
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4d1515f45ba2..4869d77cbe91 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -227,7 +227,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
227 } 227 }
228 228
229 rcu_read_lock(); 229 rcu_read_lock();
230 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); 230 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
231 if (ulp_ops) 231 if (ulp_ops)
232 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); 232 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
233 rcu_read_unlock(); 233 rcu_read_unlock();
@@ -319,6 +319,20 @@ static int cnic_abort_prep(struct cnic_sock *csk)
319 return 0; 319 return 0;
320} 320}
321 321
322static void cnic_uio_stop(void)
323{
324 struct cnic_dev *dev;
325
326 read_lock(&cnic_dev_lock);
327 list_for_each_entry(dev, &cnic_dev_list, list) {
328 struct cnic_local *cp = dev->cnic_priv;
329
330 if (cp->cnic_uinfo)
331 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
332 }
333 read_unlock(&cnic_dev_lock);
334}
335
322int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 336int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
323{ 337{
324 struct cnic_dev *dev; 338 struct cnic_dev *dev;
@@ -390,6 +404,9 @@ int cnic_unregister_driver(int ulp_type)
390 } 404 }
391 read_unlock(&cnic_dev_lock); 405 read_unlock(&cnic_dev_lock);
392 406
407 if (ulp_type == CNIC_ULP_ISCSI)
408 cnic_uio_stop();
409
393 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); 410 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
394 411
395 mutex_unlock(&cnic_lock); 412 mutex_unlock(&cnic_lock);
@@ -632,7 +649,6 @@ static void cnic_free_resc(struct cnic_dev *dev)
632 int i = 0; 649 int i = 0;
633 650
634 if (cp->cnic_uinfo) { 651 if (cp->cnic_uinfo) {
635 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
636 while (cp->uio_dev != -1 && i < 15) { 652 while (cp->uio_dev != -1 && i < 15) {
637 msleep(100); 653 msleep(100);
638 i++; 654 i++;
@@ -1057,6 +1073,9 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
1057 struct cnic_local *cp = dev->cnic_priv; 1073 struct cnic_local *cp = dev->cnic_priv;
1058 int if_type; 1074 int if_type;
1059 1075
1076 if (cp->cnic_uinfo)
1077 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
1078
1060 rcu_read_lock(); 1079 rcu_read_lock();
1061 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 1080 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1062 struct cnic_ulp_ops *ulp_ops; 1081 struct cnic_ulp_ops *ulp_ops;
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 58afafbd3b9c..fd5e32cbcb87 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1097,7 +1097,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
1097 .ndo_start_xmit = cpmac_start_xmit, 1097 .ndo_start_xmit = cpmac_start_xmit,
1098 .ndo_tx_timeout = cpmac_tx_timeout, 1098 .ndo_tx_timeout = cpmac_tx_timeout,
1099 .ndo_set_multicast_list = cpmac_set_multicast_list, 1099 .ndo_set_multicast_list = cpmac_set_multicast_list,
1100 .ndo_so_ioctl = cpmac_ioctl, 1100 .ndo_do_ioctl = cpmac_ioctl,
1101 .ndo_set_config = cpmac_config, 1101 .ndo_set_config = cpmac_config,
1102 .ndo_change_mtu = eth_change_mtu, 1102 .ndo_change_mtu = eth_change_mtu,
1103 .ndo_validate_addr = eth_validate_addr, 1103 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 3eee666a9cd2..55445f980f9c 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1524,6 +1524,7 @@ static void net_timeout(struct net_device *dev)
1524static int net_send_packet(struct sk_buff *skb, struct net_device *dev) 1524static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
1525{ 1525{
1526 struct net_local *lp = netdev_priv(dev); 1526 struct net_local *lp = netdev_priv(dev);
1527 unsigned long flags;
1527 1528
1528 if (net_debug > 3) { 1529 if (net_debug > 3) {
1529 printk("%s: sent %d byte packet of type %x\n", 1530 printk("%s: sent %d byte packet of type %x\n",
@@ -1535,7 +1536,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
1535 ask the chip to start transmitting before the 1536 ask the chip to start transmitting before the
1536 whole packet has been completely uploaded. */ 1537 whole packet has been completely uploaded. */
1537 1538
1538 spin_lock_irq(&lp->lock); 1539 spin_lock_irqsave(&lp->lock, flags);
1539 netif_stop_queue(dev); 1540 netif_stop_queue(dev);
1540 1541
1541 /* initiate a transmit sequence */ 1542 /* initiate a transmit sequence */
@@ -1549,13 +1550,13 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
1549 * we're waiting for TxOk, so return 1 and requeue this packet. 1550 * we're waiting for TxOk, so return 1 and requeue this packet.
1550 */ 1551 */
1551 1552
1552 spin_unlock_irq(&lp->lock); 1553 spin_unlock_irqrestore(&lp->lock, flags);
1553 if (net_debug) printk("cs89x0: Tx buffer not free!\n"); 1554 if (net_debug) printk("cs89x0: Tx buffer not free!\n");
1554 return NETDEV_TX_BUSY; 1555 return NETDEV_TX_BUSY;
1555 } 1556 }
1556 /* Write the contents of the packet */ 1557 /* Write the contents of the packet */
1557 writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1); 1558 writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
1558 spin_unlock_irq(&lp->lock); 1559 spin_unlock_irqrestore(&lp->lock, flags);
1559 lp->stats.tx_bytes += skb->len; 1560 lp->stats.tx_bytes += skb->len;
1560 dev->trans_start = jiffies; 1561 dev->trans_start = jiffies;
1561 dev_kfree_skb (skb); 1562 dev_kfree_skb (skb);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 538dda4422dc..fb5df5c6203e 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -642,8 +642,7 @@ static int setup_sge_qsets(struct adapter *adap)
642 struct port_info *pi = netdev_priv(dev); 642 struct port_info *pi = netdev_priv(dev);
643 643
644 pi->qs = &adap->sge.qs[pi->first_qset]; 644 pi->qs = &adap->sge.qs[pi->first_qset];
645 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; 645 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
646 ++j, ++qset_idx) {
647 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO); 646 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
648 err = t3_sge_alloc_qset(adap, qset_idx, 1, 647 err = t3_sge_alloc_qset(adap, qset_idx, 1,
649 (adap->flags & USING_MSIX) ? qset_idx + 1 : 648 (adap->flags & USING_MSIX) ? qset_idx + 1 :
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2df8fb0af701..12fd446f9895 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1820,11 +1820,19 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1820 struct device *emac_dev = &priv->ndev->dev; 1820 struct device *emac_dev = &priv->ndev->dev;
1821 struct sockaddr *sa = addr; 1821 struct sockaddr *sa = addr;
1822 1822
1823 if (!is_valid_ether_addr(sa->sa_data))
1824 return -EINVAL;
1825
1823 /* Store mac addr in priv and rx channel and set it in EMAC hw */ 1826 /* Store mac addr in priv and rx channel and set it in EMAC hw */
1824 memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); 1827 memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
1825 memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
1826 memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); 1828 memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
1827 emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr); 1829
1830 /* If the interface is down - rxch is NULL. */
1831 /* MAC address is configured only after the interface is enabled. */
1832 if (netif_running(ndev)) {
1833 memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
1834 emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
1835 }
1828 1836
1829 if (netif_msg_drv(priv)) 1837 if (netif_msg_drv(priv))
1830 dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n", 1838 dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 895d72143ee0..4b6a219fecea 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -268,8 +268,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
268 printk(KERN_INFO "tx_coalesce:\t%d packets\n", 268 printk(KERN_INFO "tx_coalesce:\t%d packets\n",
269 tx_coalesce); 269 tx_coalesce);
270 if (np->coalesce) 270 if (np->coalesce)
271 printk(KERN_INFO "rx_coalesce:\t%d packets\n" 271 printk(KERN_INFO
272 KERN_INFO "rx_timeout: \t%d ns\n", 272 "rx_coalesce:\t%d packets\n"
273 "rx_timeout: \t%d ns\n",
273 np->rx_coalesce, np->rx_timeout*640); 274 np->rx_coalesce, np->rx_timeout*640);
274 if (np->vlan) 275 if (np->vlan)
275 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 276 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
@@ -1522,9 +1523,9 @@ mii_get_media (struct net_device *dev)
1522 printk (KERN_INFO "Operating at 10 Mbps, "); 1523 printk (KERN_INFO "Operating at 10 Mbps, ");
1523 } 1524 }
1524 if (bmcr & MII_BMCR_DUPLEX_MODE) { 1525 if (bmcr & MII_BMCR_DUPLEX_MODE) {
1525 printk ("Full duplex\n"); 1526 printk (KERN_CONT "Full duplex\n");
1526 } else { 1527 } else {
1527 printk ("Half duplex\n"); 1528 printk (KERN_CONT "Half duplex\n");
1528 } 1529 }
1529 } 1530 }
1530 if (np->tx_flow) 1531 if (np->tx_flow)
@@ -1614,9 +1615,9 @@ mii_set_media (struct net_device *dev)
1614 } 1615 }
1615 if (np->full_duplex) { 1616 if (np->full_duplex) {
1616 bmcr |= MII_BMCR_DUPLEX_MODE; 1617 bmcr |= MII_BMCR_DUPLEX_MODE;
1617 printk ("Full duplex\n"); 1618 printk (KERN_CONT "Full duplex\n");
1618 } else { 1619 } else {
1619 printk ("Half duplex\n"); 1620 printk (KERN_CONT "Half duplex\n");
1620 } 1621 }
1621#if 0 1622#if 0
1622 /* Set 1000BaseT Master/Slave setting */ 1623 /* Set 1000BaseT Master/Slave setting */
@@ -1669,9 +1670,9 @@ mii_get_media_pcs (struct net_device *dev)
1669 __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); 1670 __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
1670 printk (KERN_INFO "Operating at 1000 Mbps, "); 1671 printk (KERN_INFO "Operating at 1000 Mbps, ");
1671 if (bmcr & MII_BMCR_DUPLEX_MODE) { 1672 if (bmcr & MII_BMCR_DUPLEX_MODE) {
1672 printk ("Full duplex\n"); 1673 printk (KERN_CONT "Full duplex\n");
1673 } else { 1674 } else {
1674 printk ("Half duplex\n"); 1675 printk (KERN_CONT "Half duplex\n");
1675 } 1676 }
1676 } 1677 }
1677 if (np->tx_flow) 1678 if (np->tx_flow)
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index efa680f4b8dd..41b648a67fec 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1897,6 +1897,9 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1897 1897
1898 if (ioread8(&nic->csr->scb.status) & rus_no_res) 1898 if (ioread8(&nic->csr->scb.status) & rus_no_res)
1899 nic->ru_running = RU_SUSPENDED; 1899 nic->ru_running = RU_SUSPENDED;
1900 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1901 sizeof(struct rfd),
1902 PCI_DMA_BIDIRECTIONAL);
1900 return -ENODATA; 1903 return -ENODATA;
1901 } 1904 }
1902 1905
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5e3356f8eb5a..5b8cbdb4b520 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2185 /* Free all the Rx ring sk_buffs */ 2185 /* Free all the Rx ring sk_buffs */
2186 for (i = 0; i < rx_ring->count; i++) { 2186 for (i = 0; i < rx_ring->count; i++) {
2187 buffer_info = &rx_ring->buffer_info[i]; 2187 buffer_info = &rx_ring->buffer_info[i];
2188 if (buffer_info->skb) { 2188 if (buffer_info->dma) {
2189 pci_unmap_single(pdev, 2189 pci_unmap_single(pdev,
2190 buffer_info->dma, 2190 buffer_info->dma,
2191 buffer_info->length, 2191 buffer_info->length,
2192 PCI_DMA_FROMDEVICE); 2192 PCI_DMA_FROMDEVICE);
2193 }
2194
2195 buffer_info->dma = 0;
2193 2196
2197 if (buffer_info->skb) {
2194 dev_kfree_skb(buffer_info->skb); 2198 dev_kfree_skb(buffer_info->skb);
2195 buffer_info->skb = NULL; 2199 buffer_info->skb = NULL;
2196 } 2200 }
@@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4033 buffer_info->dma, 4037 buffer_info->dma,
4034 buffer_info->length, 4038 buffer_info->length,
4035 PCI_DMA_FROMDEVICE); 4039 PCI_DMA_FROMDEVICE);
4040 buffer_info->dma = 0;
4036 4041
4037 length = le16_to_cpu(rx_desc->length); 4042 length = le16_to_cpu(rx_desc->length);
4038 /* !EOP means multiple descriptors were used to store a single 4043 /* !EOP means multiple descriptors were used to store a single
@@ -4222,6 +4227,7 @@ map_skb:
4222 pci_unmap_single(pdev, buffer_info->dma, 4227 pci_unmap_single(pdev, buffer_info->dma,
4223 adapter->rx_buffer_len, 4228 adapter->rx_buffer_len,
4224 PCI_DMA_FROMDEVICE); 4229 PCI_DMA_FROMDEVICE);
4230 buffer_info->dma = 0;
4225 4231
4226 break; /* while !buffer_info->skb */ 4232 break; /* while !buffer_info->skb */
4227 } 4233 }
@@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4817 4823
4818 netif_device_detach(netdev); 4824 netif_device_detach(netdev);
4819 4825
4826 if (state == pci_channel_io_perm_failure)
4827 return PCI_ERS_RESULT_DISCONNECT;
4828
4820 if (netif_running(netdev)) 4829 if (netif_running(netdev))
4821 e1000_down(adapter); 4830 e1000_down(adapter);
4822 pci_disable_device(pdev); 4831 pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 8890c97e1120..c0f185beb8bc 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -238,6 +238,7 @@
238#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 238#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
239#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 239#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
240#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ 240#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
241#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
241#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ 242#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
242 243
243/* Constants used to interpret the masked PCI-X bus speed. */ 244/* Constants used to interpret the masked PCI-X bus speed. */
@@ -575,6 +576,8 @@
575#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ 576#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
576#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ 577#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
577 578
579#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
580
578/* NVM Control */ 581/* NVM Control */
579#define E1000_EECD_SK 0x00000001 /* NVM Clock */ 582#define E1000_EECD_SK 0x00000001 /* NVM Clock */
580#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ 583#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 163c1c0cfee7..fd44d9f90769 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -215,6 +215,7 @@ enum e1e_registers {
215 E1000_SWSM = 0x05B50, /* SW Semaphore */ 215 E1000_SWSM = 0x05B50, /* SW Semaphore */
216 E1000_FWSM = 0x05B54, /* FW Semaphore */ 216 E1000_FWSM = 0x05B54, /* FW Semaphore */
217 E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ 217 E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
218 E1000_CRC_OFFSET = 0x05F50, /* CRC Offset register */
218 E1000_HICR = 0x08F00, /* Host Interface Control */ 219 E1000_HICR = 0x08F00, /* Host Interface Control */
219}; 220};
220 221
@@ -302,6 +303,9 @@ enum e1e_registers {
302#define E1000_KMRNCTRLSTA_REN 0x00200000 303#define E1000_KMRNCTRLSTA_REN 0x00200000
303#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ 304#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
304#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ 305#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
306#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
307#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E
308#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400
305 309
306#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 310#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
307#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ 311#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9e23f50fb9cd..d56c7473144a 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,6 +338,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
338{ 338{
339 struct e1000_nvm_info *nvm = &hw->nvm; 339 struct e1000_nvm_info *nvm = &hw->nvm;
340 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 340 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
341 union ich8_hws_flash_status hsfsts;
341 u32 gfpreg; 342 u32 gfpreg;
342 u32 sector_base_addr; 343 u32 sector_base_addr;
343 u32 sector_end_addr; 344 u32 sector_end_addr;
@@ -374,6 +375,20 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
374 /* Adjust to word count */ 375 /* Adjust to word count */
375 nvm->flash_bank_size /= sizeof(u16); 376 nvm->flash_bank_size /= sizeof(u16);
376 377
378 /*
379 * Make sure the flash bank size does not overwrite the 4k
380 * sector ranges. We may have 64k allotted to us but we only care
381 * about the first 2 4k sectors. Therefore, if we have anything less
382 * than 64k set in the HSFSTS register, we will reduce the bank size
383 * down to 4k and let the rest remain unused. If berasesz == 3, then
384 * we are working in 64k mode. Otherwise we are not.
385 */
386 if (nvm->flash_bank_size > E1000_ICH8_SHADOW_RAM_WORDS) {
387 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
388 if (hsfsts.hsf_status.berasesz != 3)
389 nvm->flash_bank_size = E1000_ICH8_SHADOW_RAM_WORDS;
390 }
391
377 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; 392 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
378 393
379 /* Clear shadow ram */ 394 /* Clear shadow ram */
@@ -446,6 +461,95 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
446 return 0; 461 return 0;
447} 462}
448 463
464/**
465 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
466 * @hw: pointer to the HW structure
467 *
468 * Checks to see of the link status of the hardware has changed. If a
469 * change in link status has been detected, then we read the PHY registers
470 * to get the current speed/duplex if link exists.
471 **/
472static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
473{
474 struct e1000_mac_info *mac = &hw->mac;
475 s32 ret_val;
476 bool link;
477
478 /*
479 * We only want to go out to the PHY registers to see if Auto-Neg
480 * has completed and/or if our link status has changed. The
481 * get_link_status flag is set upon receiving a Link Status
482 * Change or Rx Sequence Error interrupt.
483 */
484 if (!mac->get_link_status) {
485 ret_val = 0;
486 goto out;
487 }
488
489 if (hw->mac.type == e1000_pchlan) {
490 ret_val = e1000e_write_kmrn_reg(hw,
491 E1000_KMRNCTRLSTA_K1_CONFIG,
492 E1000_KMRNCTRLSTA_K1_ENABLE);
493 if (ret_val)
494 goto out;
495 }
496
497 /*
498 * First we want to see if the MII Status Register reports
499 * link. If so, then we want to get the current speed/duplex
500 * of the PHY.
501 */
502 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
503 if (ret_val)
504 goto out;
505
506 if (!link)
507 goto out; /* No link detected */
508
509 mac->get_link_status = false;
510
511 if (hw->phy.type == e1000_phy_82578) {
512 ret_val = e1000_link_stall_workaround_hv(hw);
513 if (ret_val)
514 goto out;
515 }
516
517 /*
518 * Check if there was DownShift, must be checked
519 * immediately after link-up
520 */
521 e1000e_check_downshift(hw);
522
523 /*
524 * If we are forcing speed/duplex, then we simply return since
525 * we have already determined whether we have link or not.
526 */
527 if (!mac->autoneg) {
528 ret_val = -E1000_ERR_CONFIG;
529 goto out;
530 }
531
532 /*
533 * Auto-Neg is enabled. Auto Speed Detection takes care
534 * of MAC speed/duplex configuration. So we only need to
535 * configure Collision Distance in the MAC.
536 */
537 e1000e_config_collision_dist(hw);
538
539 /*
540 * Configure Flow Control now that Auto-Neg has completed.
541 * First, we need to restore the desired flow control
542 * settings because we may have had to re-autoneg with a
543 * different link partner.
544 */
545 ret_val = e1000e_config_fc_after_link_up(hw);
546 if (ret_val)
547 hw_dbg(hw, "Error configuring flow control\n");
548
549out:
550 return ret_val;
551}
552
449static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) 553static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
450{ 554{
451 struct e1000_hw *hw = &adapter->hw; 555 struct e1000_hw *hw = &adapter->hw;
@@ -694,6 +798,38 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
694} 798}
695 799
696/** 800/**
801 * e1000_lan_init_done_ich8lan - Check for PHY config completion
802 * @hw: pointer to the HW structure
803 *
804 * Check the appropriate indication the MAC has finished configuring the
805 * PHY after a software reset.
806 **/
807static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
808{
809 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
810
811 /* Wait for basic configuration completes before proceeding */
812 do {
813 data = er32(STATUS);
814 data &= E1000_STATUS_LAN_INIT_DONE;
815 udelay(100);
816 } while ((!data) && --loop);
817
818 /*
819 * If basic configuration is incomplete before the above loop
820 * count reaches 0, loading the configuration from NVM will
821 * leave the PHY in a bad state possibly resulting in no link.
822 */
823 if (loop == 0)
824 hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n");
825
826 /* Clear the Init Done bit for the next init event */
827 data = er32(STATUS);
828 data &= ~E1000_STATUS_LAN_INIT_DONE;
829 ew32(STATUS, data);
830}
831
832/**
697 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset 833 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
698 * @hw: pointer to the HW structure 834 * @hw: pointer to the HW structure
699 * 835 *
@@ -707,13 +843,15 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
707 u32 i; 843 u32 i;
708 u32 data, cnf_size, cnf_base_addr, sw_cfg_mask; 844 u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
709 s32 ret_val; 845 s32 ret_val;
710 u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT;
711 u16 word_addr, reg_data, reg_addr, phy_page = 0; 846 u16 word_addr, reg_data, reg_addr, phy_page = 0;
712 847
713 ret_val = e1000e_phy_hw_reset_generic(hw); 848 ret_val = e1000e_phy_hw_reset_generic(hw);
714 if (ret_val) 849 if (ret_val)
715 return ret_val; 850 return ret_val;
716 851
852 /* Allow time for h/w to get to a quiescent state after reset */
853 mdelay(10);
854
717 if (hw->mac.type == e1000_pchlan) { 855 if (hw->mac.type == e1000_pchlan) {
718 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 856 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
719 if (ret_val) 857 if (ret_val)
@@ -741,26 +879,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
741 if (!(data & sw_cfg_mask)) 879 if (!(data & sw_cfg_mask))
742 return 0; 880 return 0;
743 881
744 /* Wait for basic configuration completes before proceeding*/ 882 /* Wait for basic configuration completes before proceeding */
745 do { 883 e1000_lan_init_done_ich8lan(hw);
746 data = er32(STATUS);
747 data &= E1000_STATUS_LAN_INIT_DONE;
748 udelay(100);
749 } while ((!data) && --loop);
750
751 /*
752 * If basic configuration is incomplete before the above loop
753 * count reaches 0, loading the configuration from NVM will
754 * leave the PHY in a bad state possibly resulting in no link.
755 */
756 if (loop == 0) {
757 hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n");
758 }
759
760 /* Clear the Init Done bit for the next init event */
761 data = er32(STATUS);
762 data &= ~E1000_STATUS_LAN_INIT_DONE;
763 ew32(STATUS, data);
764 884
765 /* 885 /*
766 * Make sure HW does not configure LCD from PHY 886 * Make sure HW does not configure LCD from PHY
@@ -961,12 +1081,14 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
961 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 1081 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
962 ew32(PHY_CTRL, phy_ctrl); 1082 ew32(PHY_CTRL, phy_ctrl);
963 1083
1084 if (phy->type != e1000_phy_igp_3)
1085 return 0;
1086
964 /* 1087 /*
965 * Call gig speed drop workaround on LPLU before accessing 1088 * Call gig speed drop workaround on LPLU before accessing
966 * any PHY registers 1089 * any PHY registers
967 */ 1090 */
968 if ((hw->mac.type == e1000_ich8lan) && 1091 if (hw->mac.type == e1000_ich8lan)
969 (hw->phy.type == e1000_phy_igp_3))
970 e1000e_gig_downshift_workaround_ich8lan(hw); 1092 e1000e_gig_downshift_workaround_ich8lan(hw);
971 1093
972 /* When LPLU is enabled, we should disable SmartSpeed */ 1094 /* When LPLU is enabled, we should disable SmartSpeed */
@@ -979,6 +1101,9 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
979 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 1101 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
980 ew32(PHY_CTRL, phy_ctrl); 1102 ew32(PHY_CTRL, phy_ctrl);
981 1103
1104 if (phy->type != e1000_phy_igp_3)
1105 return 0;
1106
982 /* 1107 /*
983 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 1108 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
984 * during Dx states where the power conservation is most 1109 * during Dx states where the power conservation is most
@@ -1038,6 +1163,10 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1038 if (!active) { 1163 if (!active) {
1039 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 1164 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
1040 ew32(PHY_CTRL, phy_ctrl); 1165 ew32(PHY_CTRL, phy_ctrl);
1166
1167 if (phy->type != e1000_phy_igp_3)
1168 return 0;
1169
1041 /* 1170 /*
1042 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 1171 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1043 * during Dx states where the power conservation is most 1172 * during Dx states where the power conservation is most
@@ -1073,12 +1202,14 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1073 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 1202 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
1074 ew32(PHY_CTRL, phy_ctrl); 1203 ew32(PHY_CTRL, phy_ctrl);
1075 1204
1205 if (phy->type != e1000_phy_igp_3)
1206 return 0;
1207
1076 /* 1208 /*
1077 * Call gig speed drop workaround on LPLU before accessing 1209 * Call gig speed drop workaround on LPLU before accessing
1078 * any PHY registers 1210 * any PHY registers
1079 */ 1211 */
1080 if ((hw->mac.type == e1000_ich8lan) && 1212 if (hw->mac.type == e1000_ich8lan)
1081 (hw->phy.type == e1000_phy_igp_3))
1082 e1000e_gig_downshift_workaround_ich8lan(hw); 1213 e1000e_gig_downshift_workaround_ich8lan(hw);
1083 1214
1084 /* When LPLU is enabled, we should disable SmartSpeed */ 1215 /* When LPLU is enabled, we should disable SmartSpeed */
@@ -1905,7 +2036,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1905 break; 2036 break;
1906 case 1: 2037 case 1:
1907 sector_size = ICH_FLASH_SEG_SIZE_4K; 2038 sector_size = ICH_FLASH_SEG_SIZE_4K;
1908 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K; 2039 iteration = 1;
1909 break; 2040 break;
1910 case 2: 2041 case 2:
1911 if (hw->mac.type == e1000_ich9lan) { 2042 if (hw->mac.type == e1000_ich9lan) {
@@ -1917,7 +2048,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1917 break; 2048 break;
1918 case 3: 2049 case 3:
1919 sector_size = ICH_FLASH_SEG_SIZE_64K; 2050 sector_size = ICH_FLASH_SEG_SIZE_64K;
1920 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K; 2051 iteration = 1;
1921 break; 2052 break;
1922 default: 2053 default:
1923 return -E1000_ERR_NVM; 2054 return -E1000_ERR_NVM;
@@ -2143,6 +2274,12 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2143 ctrl = er32(CTRL); 2274 ctrl = er32(CTRL);
2144 2275
2145 if (!e1000_check_reset_block(hw)) { 2276 if (!e1000_check_reset_block(hw)) {
2277 /* Clear PHY Reset Asserted bit */
2278 if (hw->mac.type >= e1000_pchlan) {
2279 u32 status = er32(STATUS);
2280 ew32(STATUS, status & ~E1000_STATUS_PHYRA);
2281 }
2282
2146 /* 2283 /*
2147 * PHY HW reset requires MAC CORE reset at the same 2284 * PHY HW reset requires MAC CORE reset at the same
2148 * time to make sure the interface between MAC and the 2285 * time to make sure the interface between MAC and the
@@ -2156,23 +2293,34 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2156 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 2293 ew32(CTRL, (ctrl | E1000_CTRL_RST));
2157 msleep(20); 2294 msleep(20);
2158 2295
2159 if (!ret_val) { 2296 if (!ret_val)
2160 /* release the swflag because it is not reset by
2161 * hardware reset
2162 */
2163 e1000_release_swflag_ich8lan(hw); 2297 e1000_release_swflag_ich8lan(hw);
2164 }
2165 2298
2166 ret_val = e1000e_get_auto_rd_done(hw); 2299 if (ctrl & E1000_CTRL_PHY_RST)
2167 if (ret_val) { 2300 ret_val = hw->phy.ops.get_cfg_done(hw);
2168 /* 2301
2169 * When auto config read does not complete, do not 2302 if (hw->mac.type >= e1000_ich10lan) {
2170 * return with an error. This can happen in situations 2303 e1000_lan_init_done_ich8lan(hw);
2171 * where there is no eeprom and prevents getting link. 2304 } else {
2172 */ 2305 ret_val = e1000e_get_auto_rd_done(hw);
2173 hw_dbg(hw, "Auto Read Done did not complete\n"); 2306 if (ret_val) {
2307 /*
2308 * When auto config read does not complete, do not
2309 * return with an error. This can happen in situations
2310 * where there is no eeprom and prevents getting link.
2311 */
2312 hw_dbg(hw, "Auto Read Done did not complete\n");
2313 }
2174 } 2314 }
2175 2315
2316 /*
2317 * For PCH, this write will make sure that any noise
2318 * will be detected as a CRC error and be dropped rather than show up
2319 * as a bad packet to the DMA engine.
2320 */
2321 if (hw->mac.type == e1000_pchlan)
2322 ew32(CRC_OFFSET, 0x65656565);
2323
2176 ew32(IMC, 0xffffffff); 2324 ew32(IMC, 0xffffffff);
2177 icr = er32(ICR); 2325 icr = er32(ICR);
2178 2326
@@ -2222,6 +2370,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
2222 for (i = 0; i < mac->mta_reg_count; i++) 2370 for (i = 0; i < mac->mta_reg_count; i++)
2223 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 2371 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
2224 2372
2373 /*
2374 * The 82578 Rx buffer will stall if wakeup is enabled in host and
2375 * the ME. Reading the BM_WUC register will clear the host wakeup bit.
2376 * Reset the phy after disabling host wakeup to reset the Rx buffer.
2377 */
2378 if (hw->phy.type == e1000_phy_82578) {
2379 hw->phy.ops.read_phy_reg(hw, BM_WUC, &i);
2380 ret_val = e1000_phy_hw_reset_ich8lan(hw);
2381 if (ret_val)
2382 return ret_val;
2383 }
2384
2225 /* Setup link and flow control */ 2385 /* Setup link and flow control */
2226 ret_val = e1000_setup_link_ich8lan(hw); 2386 ret_val = e1000_setup_link_ich8lan(hw);
2227 2387
@@ -2254,16 +2414,6 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
2254 ew32(CTRL_EXT, ctrl_ext); 2414 ew32(CTRL_EXT, ctrl_ext);
2255 2415
2256 /* 2416 /*
2257 * The 82578 Rx buffer will stall if wakeup is enabled in host and
2258 * the ME. Reading the BM_WUC register will clear the host wakeup bit.
2259 * Reset the phy after disabling host wakeup to reset the Rx buffer.
2260 */
2261 if (hw->phy.type == e1000_phy_82578) {
2262 e1e_rphy(hw, BM_WUC, &i);
2263 e1000e_phy_hw_reset_generic(hw);
2264 }
2265
2266 /*
2267 * Clear all of the statistics registers (clear on read). It is 2417 * Clear all of the statistics registers (clear on read). It is
2268 * important that we do this after we have tried to establish link 2418 * important that we do this after we have tried to establish link
2269 * because the symbol error count will increment wildly if there 2419 * because the symbol error count will increment wildly if there
@@ -2485,6 +2635,14 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
2485 if (ret_val) 2635 if (ret_val)
2486 return ret_val; 2636 return ret_val;
2487 2637
2638 if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) {
2639 ret_val = e1000e_write_kmrn_reg(hw,
2640 E1000_KMRNCTRLSTA_K1_CONFIG,
2641 E1000_KMRNCTRLSTA_K1_DISABLE);
2642 if (ret_val)
2643 return ret_val;
2644 }
2645
2488 if ((hw->mac.type == e1000_ich8lan) && 2646 if ((hw->mac.type == e1000_ich8lan) &&
2489 (hw->phy.type == e1000_phy_igp_3) && 2647 (hw->phy.type == e1000_phy_igp_3) &&
2490 (*speed == SPEED_1000)) { 2648 (*speed == SPEED_1000)) {
@@ -2850,6 +3008,16 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
2850{ 3008{
2851 u32 bank = 0; 3009 u32 bank = 0;
2852 3010
3011 if (hw->mac.type >= e1000_pchlan) {
3012 u32 status = er32(STATUS);
3013
3014 if (status & E1000_STATUS_PHYRA)
3015 ew32(STATUS, status & ~E1000_STATUS_PHYRA);
3016 else
3017 hw_dbg(hw,
3018 "PHY Reset Asserted not set - needs delay\n");
3019 }
3020
2853 e1000e_get_cfg_done(hw); 3021 e1000e_get_cfg_done(hw);
2854 3022
2855 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 3023 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
@@ -2921,7 +3089,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
2921static struct e1000_mac_operations ich8_mac_ops = { 3089static struct e1000_mac_operations ich8_mac_ops = {
2922 .id_led_init = e1000e_id_led_init, 3090 .id_led_init = e1000e_id_led_init,
2923 .check_mng_mode = e1000_check_mng_mode_ich8lan, 3091 .check_mng_mode = e1000_check_mng_mode_ich8lan,
2924 .check_for_link = e1000e_check_for_copper_link, 3092 .check_for_link = e1000_check_for_copper_link_ich8lan,
2925 /* cleanup_led dependent on mac type */ 3093 /* cleanup_led dependent on mac type */
2926 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, 3094 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
2927 .get_bus_info = e1000_get_bus_info_ich8lan, 3095 .get_bus_info = e1000_get_bus_info_ich8lan,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index be6d9e990374..99ba2b8a2a05 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -378,12 +378,6 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
378 378
379 mac->get_link_status = 0; 379 mac->get_link_status = 0;
380 380
381 if (hw->phy.type == e1000_phy_82578) {
382 ret_val = e1000_link_stall_workaround_hv(hw);
383 if (ret_val)
384 return ret_val;
385 }
386
387 /* 381 /*
388 * Check if there was DownShift, must be checked 382 * Check if there was DownShift, must be checked
389 * immediately after link-up 383 * immediately after link-up
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 679885a122b4..63415bb6f48f 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4785,6 +4785,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4785 4785
4786 netif_device_detach(netdev); 4786 netif_device_detach(netdev);
4787 4787
4788 if (state == pci_channel_io_perm_failure)
4789 return PCI_ERS_RESULT_DISCONNECT;
4790
4788 if (netif_running(netdev)) 4791 if (netif_running(netdev))
4789 e1000e_down(adapter); 4792 e1000e_down(adapter);
4790 pci_disable_device(pdev); 4793 pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index e23459cf3d0e..994401fd0664 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1531,7 +1531,12 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1531 */ 1531 */
1532 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); 1532 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1533 if (ret_val) 1533 if (ret_val)
1534 break; 1534 /*
1535 * If the first read fails, another entity may have
1536 * ownership of the resources, wait and try again to
1537 * see if they have relinquished the resources yet.
1538 */
1539 udelay(usec_interval);
1535 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); 1540 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1536 if (ret_val) 1541 if (ret_val)
1537 break; 1542 break;
@@ -2737,6 +2742,11 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
2737 if (hw->phy.type != e1000_phy_82578) 2742 if (hw->phy.type != e1000_phy_82578)
2738 goto out; 2743 goto out;
2739 2744
2745 /* Do not apply workaround if in PHY loopback bit 14 set */
2746 hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &data);
2747 if (data & PHY_CONTROL_LB)
2748 goto out;
2749
2740 /* check if link is up and at 1Gbps */ 2750 /* check if link is up and at 1Gbps */
2741 ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data); 2751 ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data);
2742 if (ret_val) 2752 if (ret_val)
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index cc2ab6412c73..4f7003485348 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1784,7 +1784,7 @@ int __init init_module(void)
1784 printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n"); 1784 printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n");
1785 } 1785 }
1786 1786
1787 for (i = 0; io[i] != -1 && i < MAX_EEPRO; i++) { 1787 for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) {
1788 dev = alloc_etherdev(sizeof(struct eepro_local)); 1788 dev = alloc_etherdev(sizeof(struct eepro_local));
1789 if (!dev) 1789 if (!dev)
1790 break; 1790 break;
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 1686dca28748..1f016d66684a 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1474,13 +1474,13 @@ static void eexp_hw_init586(struct net_device *dev)
1474 outw(0x0000, ioaddr + 0x800c); 1474 outw(0x0000, ioaddr + 0x800c);
1475 outw(0x0000, ioaddr + 0x800e); 1475 outw(0x0000, ioaddr + 0x800e);
1476 1476
1477 for (i = 0; i < (sizeof(start_code)); i+=32) { 1477 for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) {
1478 int j; 1478 int j;
1479 outw(i, ioaddr + SM_PTR); 1479 outw(i, ioaddr + SM_PTR);
1480 for (j = 0; j < 16; j+=2) 1480 for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2)
1481 outw(start_code[(i+j)/2], 1481 outw(start_code[(i+j)/2],
1482 ioaddr+0x4000+j); 1482 ioaddr+0x4000+j);
1483 for (j = 0; j < 16; j+=2) 1483 for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2)
1484 outw(start_code[(i+j+16)/2], 1484 outw(start_code[(i+j+16)/2],
1485 ioaddr+0x8000+j); 1485 ioaddr+0x8000+j);
1486 } 1486 }
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 78952f8324e2..fa311a950996 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0101" 43#define DRV_VERSION "EHEA_0102"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 147c4b088fb3..977c3d358279 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -1545,6 +1545,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1545{ 1545{
1546 int ret, i; 1546 int ret, i;
1547 1547
1548 if (pr->qp)
1549 netif_napi_del(&pr->napi);
1550
1548 ret = ehea_destroy_qp(pr->qp); 1551 ret = ehea_destroy_qp(pr->qp);
1549 1552
1550 if (!ret) { 1553 if (!ret) {
@@ -3080,7 +3083,9 @@ static const struct net_device_ops ehea_netdev_ops = {
3080 .ndo_poll_controller = ehea_netpoll, 3083 .ndo_poll_controller = ehea_netpoll,
3081#endif 3084#endif
3082 .ndo_get_stats = ehea_get_stats, 3085 .ndo_get_stats = ehea_get_stats,
3086 .ndo_change_mtu = eth_change_mtu,
3083 .ndo_set_mac_address = ehea_set_mac_addr, 3087 .ndo_set_mac_address = ehea_set_mac_addr,
3088 .ndo_validate_addr = eth_validate_addr,
3084 .ndo_set_multicast_list = ehea_set_multicast_list, 3089 .ndo_set_multicast_list = ehea_set_multicast_list,
3085 .ndo_change_mtu = ehea_change_mtu, 3090 .ndo_change_mtu = ehea_change_mtu,
3086 .ndo_vlan_rx_register = ehea_vlan_rx_register, 3091 .ndo_vlan_rx_register = ehea_vlan_rx_register,
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index b60e27dfcfa7..88d7ebf31220 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -338,8 +338,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
338#ifndef MODULE 338#ifndef MODULE
339 static int printed_version; 339 static int printed_version;
340 if (!printed_version++) 340 if (!printed_version++)
341 printk (KERN_INFO "%s" KERN_INFO "%s", 341 printk(KERN_INFO "%s%s", version, version2);
342 version, version2);
343#endif 342#endif
344 343
345 card_idx++; 344 card_idx++;
@@ -1600,7 +1599,7 @@ static int __init epic_init (void)
1600{ 1599{
1601/* when a module, this is printed whether or not devices are found in probe */ 1600/* when a module, this is printed whether or not devices are found in probe */
1602#ifdef MODULE 1601#ifdef MODULE
1603 printk (KERN_INFO "%s" KERN_INFO "%s", 1602 printk (KERN_INFO "%s%s",
1604 version, version2); 1603 version, version2);
1605#endif 1604#endif
1606 1605
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 891be28a7d4f..160655d24581 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -584,7 +584,8 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
584 if (np->flags == HAS_MII_XCVR) { 584 if (np->flags == HAS_MII_XCVR) {
585 int phy, phy_idx = 0; 585 int phy, phy_idx = 0;
586 586
587 for (phy = 1; phy < 32 && phy_idx < 4; phy++) { 587 for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
588 phy++) {
588 int mii_status = mdio_read(dev, phy, 1); 589 int mii_status = mdio_read(dev, phy, 1);
589 590
590 if (mii_status != 0xffff && mii_status != 0x0000) { 591 if (mii_status != 0xffff && mii_status != 0x0000) {
@@ -1209,17 +1210,20 @@ static void fealnx_tx_timeout(struct net_device *dev)
1209 unsigned long flags; 1210 unsigned long flags;
1210 int i; 1211 int i;
1211 1212
1212 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," 1213 printk(KERN_WARNING
1213 " resetting...\n", dev->name, ioread32(ioaddr + ISR)); 1214 "%s: Transmit timed out, status %8.8x, resetting...\n",
1215 dev->name, ioread32(ioaddr + ISR));
1214 1216
1215 { 1217 {
1216 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 1218 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
1217 for (i = 0; i < RX_RING_SIZE; i++) 1219 for (i = 0; i < RX_RING_SIZE; i++)
1218 printk(" %8.8x", (unsigned int) np->rx_ring[i].status); 1220 printk(KERN_CONT " %8.8x",
1219 printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring); 1221 (unsigned int) np->rx_ring[i].status);
1222 printk(KERN_CONT "\n");
1223 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
1220 for (i = 0; i < TX_RING_SIZE; i++) 1224 for (i = 0; i < TX_RING_SIZE; i++)
1221 printk(" %4.4x", np->tx_ring[i].status); 1225 printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
1222 printk("\n"); 1226 printk(KERN_CONT "\n");
1223 } 1227 }
1224 1228
1225 spin_lock_irqsave(&np->lock, flags); 1229 spin_lock_irqsave(&np->lock, flags);
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 0f19b743749b..d4b98074b1b7 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1642,6 +1642,7 @@ static const struct net_device_ops fec_netdev_ops = {
1642 .ndo_stop = fec_enet_close, 1642 .ndo_stop = fec_enet_close,
1643 .ndo_start_xmit = fec_enet_start_xmit, 1643 .ndo_start_xmit = fec_enet_start_xmit,
1644 .ndo_set_multicast_list = set_multicast_list, 1644 .ndo_set_multicast_list = set_multicast_list,
1645 .ndo_change_mtu = eth_change_mtu,
1645 .ndo_validate_addr = eth_validate_addr, 1646 .ndo_validate_addr = eth_validate_addr,
1646 .ndo_tx_timeout = fec_timeout, 1647 .ndo_tx_timeout = fec_timeout,
1647 .ndo_set_mac_address = fec_set_mac_address, 1648 .ndo_set_mac_address = fec_set_mac_address,
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 30b7dd671336..cc47f3f057c7 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -46,12 +46,12 @@
46 46
47#else 47#else
48 48
49#define FEC_ECNTRL; 0x000 /* Ethernet control reg */ 49#define FEC_ECNTRL 0x000 /* Ethernet control reg */
50#define FEC_IEVENT; 0x004 /* Interrupt even reg */ 50#define FEC_IEVENT 0x004 /* Interrupt even reg */
51#define FEC_IMASK; 0x008 /* Interrupt mask reg */ 51#define FEC_IMASK 0x008 /* Interrupt mask reg */
52#define FEC_IVEC; 0x00c /* Interrupt vec status reg */ 52#define FEC_IVEC 0x00c /* Interrupt vec status reg */
53#define FEC_R_DES_ACTIVE; 0x010 /* Receive descriptor reg */ 53#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
54#define FEC_X_DES_ACTIVE; 0x01c /* Transmit descriptor reg */ 54#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
55#define FEC_MII_DATA 0x040 /* MII manage frame reg */ 55#define FEC_MII_DATA 0x040 /* MII manage frame reg */
56#define FEC_MII_SPEED 0x044 /* MII speed control reg */ 56#define FEC_MII_SPEED 0x044 /* MII speed control reg */
57#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */ 57#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 1094d292630f..3b4e0766c7b2 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3514,11 +3514,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3514 nv_msi_workaround(np); 3514 nv_msi_workaround(np);
3515 3515
3516#ifdef CONFIG_FORCEDETH_NAPI 3516#ifdef CONFIG_FORCEDETH_NAPI
3517 napi_schedule(&np->napi); 3517 if (napi_schedule_prep(&np->napi)) {
3518 3518 /*
3519 /* Disable furthur irq's 3519 * Disable further irq's (msix not enabled with napi)
3520 (msix not enabled with napi) */ 3520 */
3521 writel(0, base + NvRegIrqMask); 3521 writel(0, base + NvRegIrqMask);
3522 __napi_schedule(&np->napi);
3523 }
3522 3524
3523#else 3525#else
3524 do 3526 do
@@ -3615,12 +3617,13 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3615 nv_msi_workaround(np); 3617 nv_msi_workaround(np);
3616 3618
3617#ifdef CONFIG_FORCEDETH_NAPI 3619#ifdef CONFIG_FORCEDETH_NAPI
3618 napi_schedule(&np->napi); 3620 if (napi_schedule_prep(&np->napi)) {
3619 3621 /*
3620 /* Disable furthur irq's 3622 * Disable further irq's (msix not enabled with napi)
3621 (msix not enabled with napi) */ 3623 */
3622 writel(0, base + NvRegIrqMask); 3624 writel(0, base + NvRegIrqMask);
3623 3625 __napi_schedule(&np->napi);
3626 }
3624#else 3627#else
3625 do 3628 do
3626 { 3629 {
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index b892c3ad9a74..2bc2d2b20644 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -754,17 +754,16 @@ static int fs_init_phy(struct net_device *dev)
754 fep->oldlink = 0; 754 fep->oldlink = 0;
755 fep->oldspeed = 0; 755 fep->oldspeed = 0;
756 fep->oldduplex = -1; 756 fep->oldduplex = -1;
757 if(fep->fpi->phy_node) 757
758 phydev = of_phy_connect(dev, fep->fpi->phy_node, 758 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
759 &fs_adjust_link, 0, 759 PHY_INTERFACE_MODE_MII);
760 PHY_INTERFACE_MODE_MII); 760 if (!phydev) {
761 else { 761 phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
762 printk("No phy bus ID specified in BSP code\n"); 762 PHY_INTERFACE_MODE_MII);
763 return -EINVAL;
764 } 763 }
765 if (IS_ERR(phydev)) { 764 if (!phydev) {
766 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 765 dev_err(&dev->dev, "Could not attach to PHY\n");
767 return PTR_ERR(phydev); 766 return -ENODEV;
768 } 767 }
769 768
770 fep->phydev = phydev; 769 fep->phydev = phydev;
@@ -1005,6 +1004,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1005 goto out_free_fpi; 1004 goto out_free_fpi;
1006 } 1005 }
1007 1006
1007 SET_NETDEV_DEV(ndev, &ofdev->dev);
1008 dev_set_drvdata(&ofdev->dev, ndev); 1008 dev_set_drvdata(&ofdev->dev, ndev);
1009 1009
1010 fep = netdev_priv(ndev); 1010 fep = netdev_priv(ndev);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 3af581303ca2..d167090248e2 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -188,7 +188,7 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
188} 188}
189 189
190 190
191#ifdef CONFIG_GIANFAR 191#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
192static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) 192static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
193{ 193{
194 struct gfar __iomem *enet_regs; 194 struct gfar __iomem *enet_regs;
@@ -206,7 +206,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
206#endif 206#endif
207 207
208 208
209#ifdef CONFIG_UCC_GETH 209#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
210static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) 210static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
211{ 211{
212 struct device_node *np = NULL; 212 struct device_node *np = NULL;
@@ -291,7 +291,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
291 if (of_device_is_compatible(np, "fsl,gianfar-mdio") || 291 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
292 of_device_is_compatible(np, "fsl,gianfar-tbi") || 292 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
293 of_device_is_compatible(np, "gianfar")) { 293 of_device_is_compatible(np, "gianfar")) {
294#ifdef CONFIG_GIANFAR 294#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
295 tbipa = get_gfar_tbipa(regs); 295 tbipa = get_gfar_tbipa(regs);
296#else 296#else
297 err = -ENODEV; 297 err = -ENODEV;
@@ -299,7 +299,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
299#endif 299#endif
300 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || 300 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
301 of_device_is_compatible(np, "ucc_geth_phy")) { 301 of_device_is_compatible(np, "ucc_geth_phy")) {
302#ifdef CONFIG_UCC_GETH 302#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
303 u32 id; 303 u32 id;
304 static u32 mii_mng_master; 304 static u32 mii_mng_master;
305 305
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4ae1d259fced..f8ffcbf0bc39 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -156,6 +156,8 @@ static const struct net_device_ops gfar_netdev_ops = {
156 .ndo_tx_timeout = gfar_timeout, 156 .ndo_tx_timeout = gfar_timeout,
157 .ndo_do_ioctl = gfar_ioctl, 157 .ndo_do_ioctl = gfar_ioctl,
158 .ndo_vlan_rx_register = gfar_vlan_rx_register, 158 .ndo_vlan_rx_register = gfar_vlan_rx_register,
159 .ndo_set_mac_address = eth_mac_addr,
160 .ndo_validate_addr = eth_validate_addr,
159#ifdef CONFIG_NET_POLL_CONTROLLER 161#ifdef CONFIG_NET_POLL_CONTROLLER
160 .ndo_poll_controller = gfar_netpoll, 162 .ndo_poll_controller = gfar_netpoll,
161#endif 163#endif
@@ -262,15 +264,6 @@ static int gfar_of_init(struct net_device *dev)
262 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; 264 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
263 265
264 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 266 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
265 if (!priv->phy_node) {
266 u32 *fixed_link;
267
268 fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
269 if (!fixed_link) {
270 err = -ENODEV;
271 goto err_out;
272 }
273 }
274 267
275 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 268 /* Find the TBI PHY. If it's not there, we don't support SGMII */
276 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 269 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
@@ -657,13 +650,14 @@ static int init_phy(struct net_device *dev)
657 650
658 interface = gfar_get_interface(dev); 651 interface = gfar_get_interface(dev);
659 652
660 if (priv->phy_node) { 653 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
661 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 654 interface);
662 0, interface); 655 if (!priv->phydev)
663 if (!priv->phydev) { 656 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
664 dev_err(&dev->dev, "error: Could not attach to PHY\n"); 657 interface);
665 return -ENODEV; 658 if (!priv->phydev) {
666 } 659 dev_err(&dev->dev, "could not attach to PHY\n");
660 return -ENODEV;
667 } 661 }
668 662
669 if (interface == PHY_INTERFACE_MODE_SGMII) 663 if (interface == PHY_INTERFACE_MODE_SGMII)
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index dbf06e9313cc..2234118eedbb 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -366,9 +366,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
366 return -EINVAL; 366 return -EINVAL;
367 } 367 }
368 368
369 priv->rxic = mk_ic_value( 369 priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames,
370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs), 370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
371 cvals->rx_max_coalesced_frames);
372 371
373 /* Set up tx coalescing */ 372 /* Set up tx coalescing */
374 if ((cvals->tx_coalesce_usecs == 0) || 373 if ((cvals->tx_coalesce_usecs == 0) ||
@@ -390,9 +389,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
390 return -EINVAL; 389 return -EINVAL;
391 } 390 }
392 391
393 priv->txic = mk_ic_value( 392 priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames,
394 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs), 393 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
395 cvals->tx_max_coalesced_frames);
396 394
397 gfar_write(&priv->regs->rxic, 0); 395 gfar_write(&priv->regs->rxic, 0);
398 if (priv->rxcoalescing) 396 if (priv->rxcoalescing)
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 9d5b62cb30f7..d62378cbc149 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -173,8 +173,8 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
173 173
174static const char version[] __devinitconst = 174static const char version[] __devinitconst =
175KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" 175KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
176KERN_INFO " Some modifications by Eric kasten <kasten@nscl.msu.edu>\n" 176" Some modifications by Eric kasten <kasten@nscl.msu.edu>\n"
177KERN_INFO " Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n"; 177" Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n";
178 178
179 179
180/* IP_MF appears to be only defined in <netinet/ip.h>, however, 180/* IP_MF appears to be only defined in <netinet/ip.h>, however,
@@ -1080,11 +1080,14 @@ static void hamachi_tx_timeout(struct net_device *dev)
1080 { 1080 {
1081 printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring); 1081 printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring);
1082 for (i = 0; i < RX_RING_SIZE; i++) 1082 for (i = 0; i < RX_RING_SIZE; i++)
1083 printk(" %8.8x", le32_to_cpu(hmp->rx_ring[i].status_n_length)); 1083 printk(KERN_CONT " %8.8x",
1084 printk("\n"KERN_DEBUG" Tx ring %p: ", hmp->tx_ring); 1084 le32_to_cpu(hmp->rx_ring[i].status_n_length));
1085 printk(KERN_CONT "\n");
1086 printk(KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
1085 for (i = 0; i < TX_RING_SIZE; i++) 1087 for (i = 0; i < TX_RING_SIZE; i++)
1086 printk(" %4.4x", le32_to_cpu(hmp->tx_ring[i].status_n_length)); 1088 printk(KERN_CONT " %4.4x",
1087 printk("\n"); 1089 le32_to_cpu(hmp->tx_ring[i].status_n_length));
1090 printk(KERN_CONT "\n");
1088 } 1091 }
1089 1092
1090 /* Reinit the hardware and make sure the Rx and Tx processes 1093 /* Reinit the hardware and make sure the Rx and Tx processes
@@ -1753,13 +1756,13 @@ static int hamachi_close(struct net_device *dev)
1753 1756
1754#ifdef __i386__ 1757#ifdef __i386__
1755 if (hamachi_debug > 2) { 1758 if (hamachi_debug > 2) {
1756 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", 1759 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1757 (int)hmp->tx_ring_dma); 1760 (int)hmp->tx_ring_dma);
1758 for (i = 0; i < TX_RING_SIZE; i++) 1761 for (i = 0; i < TX_RING_SIZE; i++)
1759 printk(" %c #%d desc. %8.8x %8.8x.\n", 1762 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x.\n",
1760 readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ', 1763 readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
1761 i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr); 1764 i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
1762 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", 1765 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1763 (int)hmp->rx_ring_dma); 1766 (int)hmp->rx_ring_dma);
1764 for (i = 0; i < RX_RING_SIZE; i++) { 1767 for (i = 0; i < RX_RING_SIZE; i++) {
1765 printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n", 1768 printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n",
@@ -1770,7 +1773,7 @@ static int hamachi_close(struct net_device *dev)
1770 u16 *addr = (u16 *) 1773 u16 *addr = (u16 *)
1771 hmp->rx_skbuff[i]->data; 1774 hmp->rx_skbuff[i]->data;
1772 int j; 1775 int j;
1773 1776 printk(KERN_DEBUG "Addr: ");
1774 for (j = 0; j < 0x50; j++) 1777 for (j = 0; j < 0x50; j++)
1775 printk(" %4.4x", addr[j]); 1778 printk(" %4.4x", addr[j]);
1776 printk("\n"); 1779 printk("\n");
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 155160052c8b..981ab530e9ac 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -3,7 +3,7 @@
3 * devices like TTY. It interfaces between a raw TTY and the 3 * devices like TTY. It interfaces between a raw TTY and the
4 * kernel's AX.25 protocol layers. 4 * kernel's AX.25 protocol layers.
5 * 5 *
6 * Authors: Andreas Könsgen <ajk@iehk.rwth-aachen.de> 6 * Authors: Andreas Könsgen <ajk@comnets.uni-bremen.de>
7 * Ralf Baechle DL5RB <ralf@linux-mips.org> 7 * Ralf Baechle DL5RB <ralf@linux-mips.org>
8 * 8 *
9 * Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by 9 * Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 5e4b7afd0683..352703255bba 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -68,7 +68,7 @@ static const char paranoia_str[] = KERN_ERR
68 68
69static const char bc_drvname[] = "baycom_epp"; 69static const char bc_drvname[] = "baycom_epp";
70static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n" 70static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n"
71KERN_INFO "baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n"; 71"baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n";
72 72
73/* --------------------------------------------------------------------- */ 73/* --------------------------------------------------------------------- */
74 74
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 2e6fc4dc74b1..5f5af9a606f8 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -102,7 +102,7 @@
102 102
103static const char bc_drvname[] = "baycom_par"; 103static const char bc_drvname[] = "baycom_par";
104static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" 104static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
105KERN_INFO "baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n"; 105"baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n";
106 106
107/* --------------------------------------------------------------------- */ 107/* --------------------------------------------------------------------- */
108 108
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index b6a816e60c0f..aa4488e871b2 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -91,7 +91,7 @@
91 91
92static const char bc_drvname[] = "baycom_ser_fdx"; 92static const char bc_drvname[] = "baycom_ser_fdx";
93static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" 93static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
94KERN_INFO "baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; 94"baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n";
95 95
96/* --------------------------------------------------------------------- */ 96/* --------------------------------------------------------------------- */
97 97
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index 3bcc57acbe6d..88c593596020 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -79,7 +79,7 @@
79 79
80static const char bc_drvname[] = "baycom_ser_hdx"; 80static const char bc_drvname[] = "baycom_ser_hdx";
81static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" 81static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
82KERN_INFO "baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; 82"baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n";
83 83
84/* --------------------------------------------------------------------- */ 84/* --------------------------------------------------------------------- */
85 85
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 1d5379de6900..8d76cb89dbd6 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -188,11 +188,12 @@ void rgmii_put_mdio(struct of_device *ofdev, int input)
188void rgmii_detach(struct of_device *ofdev, int input) 188void rgmii_detach(struct of_device *ofdev, int input)
189{ 189{
190 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 190 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
191 struct rgmii_regs __iomem *p = dev->base; 191 struct rgmii_regs __iomem *p;
192
193 mutex_lock(&dev->lock);
194 192
195 BUG_ON(!dev || dev->users == 0); 193 BUG_ON(!dev || dev->users == 0);
194 p = dev->base;
195
196 mutex_lock(&dev->lock);
196 197
197 RGMII_DBG(dev, "detach(%d)" NL, input); 198 RGMII_DBG(dev, "detach(%d)" NL, input);
198 199
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index efd9be214885..ac28dd5a4fd1 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -190,6 +190,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
190 phy->ops.write_reg = igb_write_phy_reg_igp; 190 phy->ops.write_reg = igb_write_phy_reg_igp;
191 } 191 }
192 192
193 /* set lan id */
194 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
195 E1000_STATUS_FUNC_SHIFT;
196
193 /* Set phy->phy_addr and phy->id. */ 197 /* Set phy->phy_addr and phy->id. */
194 ret_val = igb_get_phy_id_82575(hw); 198 ret_val = igb_get_phy_id_82575(hw);
195 if (ret_val) 199 if (ret_val)
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ea17319624aa..adb09d32625d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -127,14 +127,48 @@ static void igb_restore_vlan(struct igb_adapter *);
127static void igb_ping_all_vfs(struct igb_adapter *); 127static void igb_ping_all_vfs(struct igb_adapter *);
128static void igb_msg_task(struct igb_adapter *); 128static void igb_msg_task(struct igb_adapter *);
129static int igb_rcv_msg_from_vf(struct igb_adapter *, u32); 129static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
130static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
131static void igb_set_mc_list_pools(struct igb_adapter *, int, u16); 130static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
132static void igb_vmm_control(struct igb_adapter *); 131static void igb_vmm_control(struct igb_adapter *);
133static inline void igb_set_vmolr(struct e1000_hw *, int);
134static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int);
135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); 132static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
136static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 133static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
137 134
135static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
136{
137 u32 reg_data;
138
139 reg_data = rd32(E1000_VMOLR(vfn));
140 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
141 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
142 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
143 E1000_VMOLR_AUPE | /* Accept untagged packets */
144 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
145 wr32(E1000_VMOLR(vfn), reg_data);
146}
147
148static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
149 int vfn)
150{
151 struct e1000_hw *hw = &adapter->hw;
152 u32 vmolr;
153
154 vmolr = rd32(E1000_VMOLR(vfn));
155 vmolr &= ~E1000_VMOLR_RLPML_MASK;
156 vmolr |= size | E1000_VMOLR_LPE;
157 wr32(E1000_VMOLR(vfn), vmolr);
158
159 return 0;
160}
161
162static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
163{
164 u32 reg_data;
165
166 reg_data = rd32(E1000_RAH(entry));
167 reg_data &= ~E1000_RAH_POOL_MASK;
168 reg_data |= E1000_RAH_POOL_1 << pool;;
169 wr32(E1000_RAH(entry), reg_data);
170}
171
138#ifdef CONFIG_PM 172#ifdef CONFIG_PM
139static int igb_suspend(struct pci_dev *, pm_message_t); 173static int igb_suspend(struct pci_dev *, pm_message_t);
140static int igb_resume(struct pci_dev *); 174static int igb_resume(struct pci_dev *);
@@ -4549,11 +4583,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4549 cleaned = true; 4583 cleaned = true;
4550 cleaned_count++; 4584 cleaned_count++;
4551 4585
4586 /* this is the fast path for the non-packet split case */
4552 if (!adapter->rx_ps_hdr_size) { 4587 if (!adapter->rx_ps_hdr_size) {
4553 pci_unmap_single(pdev, buffer_info->dma, 4588 pci_unmap_single(pdev, buffer_info->dma,
4554 adapter->rx_buffer_len + 4589 adapter->rx_buffer_len,
4555 NET_IP_ALIGN,
4556 PCI_DMA_FROMDEVICE); 4590 PCI_DMA_FROMDEVICE);
4591 buffer_info->dma = 0;
4557 skb_put(skb, length); 4592 skb_put(skb, length);
4558 goto send_up; 4593 goto send_up;
4559 } 4594 }
@@ -4570,8 +4605,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4570 4605
4571 if (!skb_shinfo(skb)->nr_frags) { 4606 if (!skb_shinfo(skb)->nr_frags) {
4572 pci_unmap_single(pdev, buffer_info->dma, 4607 pci_unmap_single(pdev, buffer_info->dma,
4573 adapter->rx_ps_hdr_size + NET_IP_ALIGN, 4608 adapter->rx_ps_hdr_size,
4574 PCI_DMA_FROMDEVICE); 4609 PCI_DMA_FROMDEVICE);
4610 buffer_info->dma = 0;
4575 skb_put(skb, hlen); 4611 skb_put(skb, hlen);
4576 } 4612 }
4577 4613
@@ -4713,7 +4749,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4713 bufsz = adapter->rx_ps_hdr_size; 4749 bufsz = adapter->rx_ps_hdr_size;
4714 else 4750 else
4715 bufsz = adapter->rx_buffer_len; 4751 bufsz = adapter->rx_buffer_len;
4716 bufsz += NET_IP_ALIGN;
4717 4752
4718 while (cleaned_count--) { 4753 while (cleaned_count--) {
4719 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 4754 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
@@ -4737,7 +4772,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4737 } 4772 }
4738 4773
4739 if (!buffer_info->skb) { 4774 if (!buffer_info->skb) {
4740 skb = netdev_alloc_skb(netdev, bufsz); 4775 skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
4741 if (!skb) { 4776 if (!skb) {
4742 adapter->alloc_rx_buff_failed++; 4777 adapter->alloc_rx_buff_failed++;
4743 goto no_buffers; 4778 goto no_buffers;
@@ -5338,6 +5373,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5338 5373
5339 netif_device_detach(netdev); 5374 netif_device_detach(netdev);
5340 5375
5376 if (state == pci_channel_io_perm_failure)
5377 return PCI_ERS_RESULT_DISCONNECT;
5378
5341 if (netif_running(netdev)) 5379 if (netif_running(netdev))
5342 igb_down(adapter); 5380 igb_down(adapter);
5343 pci_disable_device(pdev); 5381 pci_disable_device(pdev);
@@ -5414,43 +5452,6 @@ static void igb_io_resume(struct pci_dev *pdev)
5414 igb_get_hw_control(adapter); 5452 igb_get_hw_control(adapter);
5415} 5453}
5416 5454
5417static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
5418{
5419 u32 reg_data;
5420
5421 reg_data = rd32(E1000_VMOLR(vfn));
5422 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
5423 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
5424 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
5425 E1000_VMOLR_AUPE | /* Accept untagged packets */
5426 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
5427 wr32(E1000_VMOLR(vfn), reg_data);
5428}
5429
5430static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
5431 int vfn)
5432{
5433 struct e1000_hw *hw = &adapter->hw;
5434 u32 vmolr;
5435
5436 vmolr = rd32(E1000_VMOLR(vfn));
5437 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5438 vmolr |= size | E1000_VMOLR_LPE;
5439 wr32(E1000_VMOLR(vfn), vmolr);
5440
5441 return 0;
5442}
5443
5444static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
5445{
5446 u32 reg_data;
5447
5448 reg_data = rd32(E1000_RAH(entry));
5449 reg_data &= ~E1000_RAH_POOL_MASK;
5450 reg_data |= E1000_RAH_POOL_1 << pool;;
5451 wr32(E1000_RAH(entry), reg_data);
5452}
5453
5454static void igb_set_mc_list_pools(struct igb_adapter *adapter, 5455static void igb_set_mc_list_pools(struct igb_adapter *adapter,
5455 int entry_count, u16 total_rar_filters) 5456 int entry_count, u16 total_rar_filters)
5456{ 5457{
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 2a4faf9ade69..a9a61efa964c 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -274,6 +274,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
274 274
275 err = mbx->ops.read_posted(hw, msgbuf, 2); 275 err = mbx->ops.read_posted(hw, msgbuf, 2);
276 276
277 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
278
277 /* if nacked the vlan was rejected */ 279 /* if nacked the vlan was rejected */
278 if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) 280 if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)))
279 err = -E1000_ERR_MAC_INIT; 281 err = -E1000_ERR_MAC_INIT;
@@ -317,6 +319,8 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
317 if (!ret_val) 319 if (!ret_val)
318 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); 320 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
319 321
322 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
323
320 /* if nacked the address was rejected, use "perm_addr" */ 324 /* if nacked the address was rejected, use "perm_addr" */
321 if (!ret_val && 325 if (!ret_val &&
322 (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) 326 (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index f3eed6a8fba5..911c082cee5a 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size)
677 return 0; 677 return 0;
678} 678}
679 679
680static const struct net_device_ops bfin_sir_ndo = {
681 .ndo_open = bfin_sir_open,
682 .ndo_stop = bfin_sir_stop,
683 .ndo_start_xmit = bfin_sir_hard_xmit,
684 .ndo_do_ioctl = bfin_sir_ioctl,
685 .ndo_get_stats = bfin_sir_stats,
686};
687
680static int __devinit bfin_sir_probe(struct platform_device *pdev) 688static int __devinit bfin_sir_probe(struct platform_device *pdev)
681{ 689{
682 struct net_device *dev; 690 struct net_device *dev;
@@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev)
718 if (err) 726 if (err)
719 goto err_mem_3; 727 goto err_mem_3;
720 728
721 dev->hard_start_xmit = bfin_sir_hard_xmit; 729 dev->netdev_ops = &bfin_sir_ndo;
722 dev->open = bfin_sir_open; 730 dev->irq = sir_port->irq;
723 dev->stop = bfin_sir_stop;
724 dev->do_ioctl = bfin_sir_ioctl;
725 dev->get_stats = bfin_sir_stats;
726 dev->irq = sir_port->irq;
727 731
728 irda_init_max_qos_capabilies(&self->qos); 732 irda_init_max_qos_capabilies(&self->qos);
729 733
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index d53aa9582137..20f9bc626688 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -31,7 +31,6 @@
31#include <linux/tty.h> 31#include <linux/tty.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <linux/smp_lock.h>
35#include <linux/delay.h> 34#include <linux/delay.h>
36#include <linux/mutex.h> 35#include <linux/mutex.h>
37 36
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
index 73585fd8f29f..d12377b84358 100644
--- a/drivers/net/isa-skeleton.c
+++ b/drivers/net/isa-skeleton.c
@@ -430,7 +430,8 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
430 * hardware interrupt handler. Queue flow control is 430 * hardware interrupt handler. Queue flow control is
431 * thus managed under this lock as well. 431 * thus managed under this lock as well.
432 */ 432 */
433 spin_lock_irq(&np->lock); 433 unsigned long flags;
434 spin_lock_irqsave(&np->lock, flags);
434 435
435 add_to_tx_ring(np, skb, length); 436 add_to_tx_ring(np, skb, length);
436 dev->trans_start = jiffies; 437 dev->trans_start = jiffies;
@@ -446,7 +447,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
446 * is when the transmit statistics are updated. 447 * is when the transmit statistics are updated.
447 */ 448 */
448 449
449 spin_unlock_irq(&np->lock); 450 spin_unlock_irqrestore(&np->lock, flags);
450#else 451#else
451 /* This is the case for older hardware which takes 452 /* This is the case for older hardware which takes
452 * a single transmit buffer at a time, and it is 453 * a single transmit buffer at a time, and it is
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index cd22323cfd22..e11d83d5852b 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -96,6 +96,8 @@
96#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 96#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
97#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 97#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
98 98
99#define IXGBE_MAX_RSC_INT_RATE 162760
100
99/* wrapper around a pointer to a socket buffer, 101/* wrapper around a pointer to a socket buffer,
100 * so a DMA handle can be stored along with the buffer */ 102 * so a DMA handle can be stored along with the buffer */
101struct ixgbe_tx_buffer { 103struct ixgbe_tx_buffer {
@@ -327,6 +329,7 @@ struct ixgbe_adapter {
327#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) 329#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
328#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26) 330#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26)
329#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) 331#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
332#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28)
330#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) 333#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
331 334
332 u32 flags2; 335 u32 flags2;
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index b9923047ce11..522c03bc1dad 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -50,6 +50,51 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
50 u8 *eeprom_data); 50 u8 *eeprom_data);
51 51
52/** 52/**
53 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
54 * @hw: pointer to the HW structure
55 *
56 * The defaults for 82598 should be in the range of 50us to 50ms,
57 * however the hardware default for these parts is 500us to 1ms which is less
58 * than the 10ms recommended by the pci-e spec. To address this we need to
59 * increase the value to either 10ms to 250ms for capability version 1 config,
60 * or 16ms to 55ms for version 2.
61 **/
62void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
63{
64 struct ixgbe_adapter *adapter = hw->back;
65 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
66 u16 pcie_devctl2;
67
68 /* only take action if timeout value is defaulted to 0 */
69 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
70 goto out;
71
72 /*
73 * if capababilities version is type 1 we can write the
74 * timeout of 10ms to 250ms through the GCR register
75 */
76 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
77 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
78 goto out;
79 }
80
81 /*
82 * for version 2 capabilities we need to write the config space
83 * directly in order to set the completion timeout value for
84 * 16ms to 55ms
85 */
86 pci_read_config_word(adapter->pdev,
87 IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
88 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
89 pci_write_config_word(adapter->pdev,
90 IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
91out:
92 /* disable completion timeout resend */
93 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
94 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
95}
96
97/**
53 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count 98 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
54 * @hw: pointer to hardware structure 99 * @hw: pointer to hardware structure
55 * 100 *
@@ -153,6 +198,26 @@ out:
153} 198}
154 199
155/** 200/**
201 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
202 * @hw: pointer to hardware structure
203 *
204 * Starts the hardware using the generic start_hw function.
205 * Then set pcie completion timeout
206 **/
207s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
208{
209 s32 ret_val = 0;
210
211 ret_val = ixgbe_start_hw_generic(hw);
212
213 /* set the completion timeout for interface */
214 if (ret_val == 0)
215 ixgbe_set_pcie_completion_timeout(hw);
216
217 return ret_val;
218}
219
220/**
156 * ixgbe_get_link_capabilities_82598 - Determines link capabilities 221 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
157 * @hw: pointer to hardware structure 222 * @hw: pointer to hardware structure
158 * @speed: pointer to link speed 223 * @speed: pointer to link speed
@@ -1085,7 +1150,7 @@ out:
1085static struct ixgbe_mac_operations mac_ops_82598 = { 1150static struct ixgbe_mac_operations mac_ops_82598 = {
1086 .init_hw = &ixgbe_init_hw_generic, 1151 .init_hw = &ixgbe_init_hw_generic,
1087 .reset_hw = &ixgbe_reset_hw_82598, 1152 .reset_hw = &ixgbe_reset_hw_82598,
1088 .start_hw = &ixgbe_start_hw_generic, 1153 .start_hw = &ixgbe_start_hw_82598,
1089 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 1154 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
1090 .get_media_type = &ixgbe_get_media_type_82598, 1155 .get_media_type = &ixgbe_get_media_type_82598,
1091 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, 1156 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index d56890f5c9d5..1c7265732900 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -106,8 +106,6 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
106{ 106{
107 struct ixgbe_adapter *adapter = netdev_priv(netdev); 107 struct ixgbe_adapter *adapter = netdev_priv(netdev);
108 108
109 DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
110
111 return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); 109 return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
112} 110}
113 111
@@ -116,8 +114,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
116 u8 err = 0; 114 u8 err = 0;
117 struct ixgbe_adapter *adapter = netdev_priv(netdev); 115 struct ixgbe_adapter *adapter = netdev_priv(netdev);
118 116
119 DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
120
121 if (state > 0) { 117 if (state > 0) {
122 /* Turn on DCB */ 118 /* Turn on DCB */
123 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 119 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
@@ -138,7 +134,23 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
138 adapter->hw.fc.requested_mode = ixgbe_fc_none; 134 adapter->hw.fc.requested_mode = ixgbe_fc_none;
139 } 135 }
140 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 136 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
137 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
138 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
139 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
140 }
141 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 141 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
142#ifdef IXGBE_FCOE
143 /* Turn on FCoE offload */
144 if ((adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) &&
145 (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))) {
146 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
147 adapter->ring_feature[RING_F_FCOE].indices =
148 IXGBE_FCRETA_SIZE;
149 netdev->features |= NETIF_F_FCOE_CRC;
150 netdev->features |= NETIF_F_FSO;
151 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
152 }
153#endif /* IXGBE_FCOE */
142 ixgbe_init_interrupt_scheme(adapter); 154 ixgbe_init_interrupt_scheme(adapter);
143 if (netif_running(netdev)) 155 if (netif_running(netdev))
144 netdev->netdev_ops->ndo_open(netdev); 156 netdev->netdev_ops->ndo_open(netdev);
@@ -154,6 +166,20 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
154 adapter->dcb_cfg.pfc_mode_enable = false; 166 adapter->dcb_cfg.pfc_mode_enable = false;
155 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 167 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
156 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 168 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
169 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
170 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
171
172#ifdef IXGBE_FCOE
173 /* Turn off FCoE offload */
174 if (adapter->flags & (IXGBE_FLAG_FCOE_CAPABLE |
175 IXGBE_FLAG_FCOE_ENABLED)) {
176 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
177 adapter->ring_feature[RING_F_FCOE].indices = 0;
178 netdev->features &= ~NETIF_F_FCOE_CRC;
179 netdev->features &= ~NETIF_F_FSO;
180 netdev->fcoe_ddp_xid = 0;
181 }
182#endif /* IXGBE_FCOE */
157 ixgbe_init_interrupt_scheme(adapter); 183 ixgbe_init_interrupt_scheme(adapter);
158 if (netif_running(netdev)) 184 if (netif_running(netdev))
159 netdev->netdev_ops->ndo_open(netdev); 185 netdev->netdev_ops->ndo_open(netdev);
@@ -169,6 +195,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
169 struct ixgbe_adapter *adapter = netdev_priv(netdev); 195 struct ixgbe_adapter *adapter = netdev_priv(netdev);
170 int i, j; 196 int i, j;
171 197
198 memset(perm_addr, 0xff, MAX_ADDR_LEN);
199
172 for (i = 0; i < netdev->addr_len; i++) 200 for (i = 0; i < netdev->addr_len; i++)
173 perm_addr[i] = adapter->hw.mac.perm_addr[i]; 201 perm_addr[i] = adapter->hw.mac.perm_addr[i];
174 202
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 86f4f3e36f27..79144e950a34 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
139 ecmd->autoneg = AUTONEG_ENABLE; 139 ecmd->autoneg = AUTONEG_ENABLE;
140 ecmd->transceiver = XCVR_EXTERNAL; 140 ecmd->transceiver = XCVR_EXTERNAL;
141 if ((hw->phy.media_type == ixgbe_media_type_copper) || 141 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
142 (hw->mac.type == ixgbe_mac_82599EB)) { 142 (hw->phy.multispeed_fiber)) {
143 ecmd->supported |= (SUPPORTED_1000baseT_Full | 143 ecmd->supported |= (SUPPORTED_1000baseT_Full |
144 SUPPORTED_Autoneg); 144 SUPPORTED_Autoneg);
145 145
@@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
217 s32 err = 0; 217 s32 err = 0;
218 218
219 if ((hw->phy.media_type == ixgbe_media_type_copper) || 219 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
220 (hw->mac.type == ixgbe_mac_82599EB)) { 220 (hw->phy.multispeed_fiber)) {
221 /* 10000/copper and 1000/copper must autoneg 221 /* 10000/copper and 1000/copper must autoneg
222 * this function does not support any duplex forcing, but can 222 * this function does not support any duplex forcing, but can
223 * limit the advertising of the adapter to only 10000 or 1000 */ 223 * limit the advertising of the adapter to only 10000 or 1000 */
@@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
245 } else { 245 } else {
246 /* in this case we currently only support 10Gb/FULL */ 246 /* in this case we currently only support 10Gb/FULL */
247 if ((ecmd->autoneg == AUTONEG_ENABLE) || 247 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
248 (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
248 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 249 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
249 return -EINVAL; 250 return -EINVAL;
250 } 251 }
@@ -1829,7 +1830,6 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1829 break; 1830 break;
1830 default: 1831 default:
1831 wol->supported = 0; 1832 wol->supported = 0;
1832 retval = 0;
1833 } 1833 }
1834 1834
1835 return retval; 1835 return retval;
@@ -1975,7 +1975,10 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
1975 * any other value means disable eitr, which is best 1975 * any other value means disable eitr, which is best
1976 * served by setting the interrupt rate very high 1976 * served by setting the interrupt rate very high
1977 */ 1977 */
1978 adapter->eitr_param = IXGBE_MAX_INT_RATE; 1978 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
1979 adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE;
1980 else
1981 adapter->eitr_param = IXGBE_MAX_INT_RATE;
1979 adapter->itr_setting = 0; 1982 adapter->itr_setting = 0;
1980 } 1983 }
1981 1984
@@ -1999,13 +2002,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
1999 2002
2000 ethtool_op_set_flags(netdev, data); 2003 ethtool_op_set_flags(netdev, data);
2001 2004
2002 if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)) 2005 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2003 return 0; 2006 return 0;
2004 2007
2005 /* if state changes we need to update adapter->flags and reset */ 2008 /* if state changes we need to update adapter->flags and reset */
2006 if ((!!(data & ETH_FLAG_LRO)) != 2009 if ((!!(data & ETH_FLAG_LRO)) !=
2007 (!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) { 2010 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2008 adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED; 2011 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2009 if (netif_running(netdev)) 2012 if (netif_running(netdev))
2010 ixgbe_reinit_locked(adapter); 2013 ixgbe_reinit_locked(adapter);
2011 else 2014 else
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e756e220db32..110c65ab5cb5 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -34,6 +34,7 @@
34#include <linux/in.h> 34#include <linux/in.h>
35#include <linux/ip.h> 35#include <linux/ip.h>
36#include <linux/tcp.h> 36#include <linux/tcp.h>
37#include <linux/pkt_sched.h>
37#include <linux/ipv6.h> 38#include <linux/ipv6.h>
38#include <net/checksum.h> 39#include <net/checksum.h>
39#include <net/ip6_checksum.h> 40#include <net/ip6_checksum.h>
@@ -510,8 +511,11 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
510 * @skb: skb currently being received and modified 511 * @skb: skb currently being received and modified
511 **/ 512 **/
512static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, 513static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
513 u32 status_err, struct sk_buff *skb) 514 union ixgbe_adv_rx_desc *rx_desc,
515 struct sk_buff *skb)
514{ 516{
517 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
518
515 skb->ip_summed = CHECKSUM_NONE; 519 skb->ip_summed = CHECKSUM_NONE;
516 520
517 /* Rx csum disabled */ 521 /* Rx csum disabled */
@@ -529,6 +533,16 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
529 return; 533 return;
530 534
531 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 535 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
536 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
537
538 /*
539 * 82599 errata, UDP frames with a 0 checksum can be marked as
540 * checksum errors.
541 */
542 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
543 (adapter->hw.mac.type == ixgbe_mac_82599EB))
544 return;
545
532 adapter->hw_csum_rx_error++; 546 adapter->hw_csum_rx_error++;
533 return; 547 return;
534 } 548 }
@@ -563,7 +577,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
563 union ixgbe_adv_rx_desc *rx_desc; 577 union ixgbe_adv_rx_desc *rx_desc;
564 struct ixgbe_rx_buffer *bi; 578 struct ixgbe_rx_buffer *bi;
565 unsigned int i; 579 unsigned int i;
566 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
567 580
568 i = rx_ring->next_to_use; 581 i = rx_ring->next_to_use;
569 bi = &rx_ring->rx_buffer_info[i]; 582 bi = &rx_ring->rx_buffer_info[i];
@@ -593,7 +606,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
593 606
594 if (!bi->skb) { 607 if (!bi->skb) {
595 struct sk_buff *skb; 608 struct sk_buff *skb;
596 skb = netdev_alloc_skb(adapter->netdev, bufsz); 609 skb = netdev_alloc_skb(adapter->netdev,
610 (rx_ring->rx_buf_len +
611 NET_IP_ALIGN));
597 612
598 if (!skb) { 613 if (!skb) {
599 adapter->alloc_rx_buff_failed++; 614 adapter->alloc_rx_buff_failed++;
@@ -608,7 +623,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
608 skb_reserve(skb, NET_IP_ALIGN); 623 skb_reserve(skb, NET_IP_ALIGN);
609 624
610 bi->skb = skb; 625 bi->skb = skb;
611 bi->dma = pci_map_single(pdev, skb->data, bufsz, 626 bi->dma = pci_map_single(pdev, skb->data,
627 rx_ring->rx_buf_len,
612 PCI_DMA_FROMDEVICE); 628 PCI_DMA_FROMDEVICE);
613 } 629 }
614 /* Refresh the desc even if buffer_addrs didn't change because 630 /* Refresh the desc even if buffer_addrs didn't change because
@@ -732,6 +748,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
732 pci_unmap_single(pdev, rx_buffer_info->dma, 748 pci_unmap_single(pdev, rx_buffer_info->dma,
733 rx_ring->rx_buf_len, 749 rx_ring->rx_buf_len,
734 PCI_DMA_FROMDEVICE); 750 PCI_DMA_FROMDEVICE);
751 rx_buffer_info->dma = 0;
735 skb_put(skb, len); 752 skb_put(skb, len);
736 } 753 }
737 754
@@ -763,7 +780,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
763 prefetch(next_rxd); 780 prefetch(next_rxd);
764 cleaned_count++; 781 cleaned_count++;
765 782
766 if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE) 783 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
767 rsc_count = ixgbe_get_rsc_count(rx_desc); 784 rsc_count = ixgbe_get_rsc_count(rx_desc);
768 785
769 if (rsc_count) { 786 if (rsc_count) {
@@ -799,7 +816,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
799 goto next_desc; 816 goto next_desc;
800 } 817 }
801 818
802 ixgbe_rx_checksum(adapter, staterr, skb); 819 ixgbe_rx_checksum(adapter, rx_desc, skb);
803 820
804 /* probably a little skewed due to removing CRC */ 821 /* probably a little skewed due to removing CRC */
805 total_rx_bytes += skb->len; 822 total_rx_bytes += skb->len;
@@ -2019,7 +2036,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2019 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 2036 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2020 } 2037 }
2021 } else { 2038 } else {
2022 if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) && 2039 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
2023 (netdev->mtu <= ETH_DATA_LEN)) 2040 (netdev->mtu <= ETH_DATA_LEN))
2024 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 2041 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2025 else 2042 else
@@ -2148,7 +2165,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2148 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2165 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2149 } 2166 }
2150 2167
2151 if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) { 2168 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2152 /* Enable 82599 HW-RSC */ 2169 /* Enable 82599 HW-RSC */
2153 for (i = 0; i < adapter->num_rx_queues; i++) { 2170 for (i = 0; i < adapter->num_rx_queues; i++) {
2154 j = adapter->rx_ring[i].reg_idx; 2171 j = adapter->rx_ring[i].reg_idx;
@@ -2694,16 +2711,23 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2694 2711
2695 /* 2712 /*
2696 * For hot-pluggable SFP+ devices, a new SFP+ module may have 2713 * For hot-pluggable SFP+ devices, a new SFP+ module may have
2697 * arrived before interrupts were enabled. We need to kick off 2714 * arrived before interrupts were enabled but after probe. Such
2698 * the SFP+ module setup first, then try to bring up link. 2715 * devices wouldn't have their type identified yet. We need to
2716 * kick off the SFP+ module setup first, then try to bring up link.
2699 * If we're not hot-pluggable SFP+, we just need to configure link 2717 * If we're not hot-pluggable SFP+, we just need to configure link
2700 * and bring it up. 2718 * and bring it up.
2701 */ 2719 */
2702 err = hw->phy.ops.identify(hw); 2720 if (hw->phy.type == ixgbe_phy_unknown) {
2703 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 2721 err = hw->phy.ops.identify(hw);
2704 DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); 2722 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2705 ixgbe_down(adapter); 2723 /*
2706 return err; 2724 * Take the device down and schedule the sfp tasklet
2725 * which will unregister_netdev and log it.
2726 */
2727 ixgbe_down(adapter);
2728 schedule_work(&adapter->sfp_config_module_task);
2729 return err;
2730 }
2707 } 2731 }
2708 2732
2709 if (ixgbe_is_sfp(hw)) { 2733 if (ixgbe_is_sfp(hw)) {
@@ -2812,9 +2836,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
2812 } 2836 }
2813 if (!rx_buffer_info->page) 2837 if (!rx_buffer_info->page)
2814 continue; 2838 continue;
2815 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, 2839 if (rx_buffer_info->page_dma) {
2816 PCI_DMA_FROMDEVICE); 2840 pci_unmap_page(pdev, rx_buffer_info->page_dma,
2817 rx_buffer_info->page_dma = 0; 2841 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
2842 rx_buffer_info->page_dma = 0;
2843 }
2818 put_page(rx_buffer_info->page); 2844 put_page(rx_buffer_info->page);
2819 rx_buffer_info->page = NULL; 2845 rx_buffer_info->page = NULL;
2820 rx_buffer_info->page_offset = 0; 2846 rx_buffer_info->page_offset = 0;
@@ -3118,7 +3144,11 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3118#endif 3144#endif
3119 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3145 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3120 DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n"); 3146 DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n");
3121 ixgbe_set_rss_queues(adapter); 3147 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3148 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3149 ixgbe_set_fdir_queues(adapter);
3150 else
3151 ixgbe_set_rss_queues(adapter);
3122 } 3152 }
3123 /* adding FCoE rx rings to the end */ 3153 /* adding FCoE rx rings to the end */
3124 f->mask = adapter->num_rx_queues; 3154 f->mask = adapter->num_rx_queues;
@@ -3376,7 +3406,12 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3376 } 3406 }
3377#endif /* CONFIG_IXGBE_DCB */ 3407#endif /* CONFIG_IXGBE_DCB */
3378 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3408 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3379 ixgbe_cache_ring_rss(adapter); 3409 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3410 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3411 ixgbe_cache_ring_fdir(adapter);
3412 else
3413 ixgbe_cache_ring_rss(adapter);
3414
3380 fcoe_i = f->mask; 3415 fcoe_i = f->mask;
3381 } 3416 }
3382 for (i = 0; i < f->indices; i++, fcoe_i++) 3417 for (i = 0; i < f->indices; i++, fcoe_i++)
@@ -3716,14 +3751,15 @@ static void ixgbe_sfp_task(struct work_struct *work)
3716 if ((hw->phy.type == ixgbe_phy_nl) && 3751 if ((hw->phy.type == ixgbe_phy_nl) &&
3717 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3752 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3718 s32 ret = hw->phy.ops.identify_sfp(hw); 3753 s32 ret = hw->phy.ops.identify_sfp(hw);
3719 if (ret) 3754 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
3720 goto reschedule; 3755 goto reschedule;
3721 ret = hw->phy.ops.reset(hw); 3756 ret = hw->phy.ops.reset(hw);
3722 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3757 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3723 DPRINTK(PROBE, ERR, "failed to initialize because an " 3758 dev_err(&adapter->pdev->dev, "failed to initialize "
3724 "unsupported SFP+ module type was detected.\n" 3759 "because an unsupported SFP+ module type "
3725 "Reload the driver after installing a " 3760 "was detected.\n"
3726 "supported module.\n"); 3761 "Reload the driver after installing a "
3762 "supported module.\n");
3727 unregister_netdev(adapter->netdev); 3763 unregister_netdev(adapter->netdev);
3728 } else { 3764 } else {
3729 DPRINTK(PROBE, INFO, "detected SFP+: %d\n", 3765 DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
@@ -3776,16 +3812,17 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3776 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 3812 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
3777 } else if (hw->mac.type == ixgbe_mac_82599EB) { 3813 } else if (hw->mac.type == ixgbe_mac_82599EB) {
3778 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 3814 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
3779 adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE; 3815 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
3780 adapter->flags |= IXGBE_FLAG2_RSC_ENABLED; 3816 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
3781 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 3817 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
3782 adapter->ring_feature[RING_F_FDIR].indices = 3818 adapter->ring_feature[RING_F_FDIR].indices =
3783 IXGBE_MAX_FDIR_INDICES; 3819 IXGBE_MAX_FDIR_INDICES;
3784 adapter->atr_sample_rate = 20; 3820 adapter->atr_sample_rate = 20;
3785 adapter->fdir_pballoc = 0; 3821 adapter->fdir_pballoc = 0;
3786#ifdef IXGBE_FCOE 3822#ifdef IXGBE_FCOE
3787 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 3823 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
3788 adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; 3824 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
3825 adapter->ring_feature[RING_F_FCOE].indices = 0;
3789#endif /* IXGBE_FCOE */ 3826#endif /* IXGBE_FCOE */
3790 } 3827 }
3791 3828
@@ -4502,7 +4539,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
4502 u32 autoneg; 4539 u32 autoneg;
4503 4540
4504 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; 4541 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
4505 if (hw->mac.ops.get_link_capabilities) 4542 autoneg = hw->phy.autoneg_advertised;
4543 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4506 hw->mac.ops.get_link_capabilities(hw, &autoneg, 4544 hw->mac.ops.get_link_capabilities(hw, &autoneg,
4507 &hw->mac.autoneg); 4545 &hw->mac.autoneg);
4508 if (hw->mac.ops.setup_link_speed) 4546 if (hw->mac.ops.setup_link_speed)
@@ -4524,10 +4562,17 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
4524 u32 err; 4562 u32 err;
4525 4563
4526 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; 4564 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
4565
4566 /* Time for electrical oscillations to settle down */
4567 msleep(100);
4527 err = hw->phy.ops.identify_sfp(hw); 4568 err = hw->phy.ops.identify_sfp(hw);
4569
4528 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4570 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4529 DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); 4571 dev_err(&adapter->pdev->dev, "failed to initialize because "
4530 ixgbe_down(adapter); 4572 "an unsupported SFP+ module type was detected.\n"
4573 "Reload the driver after installing a supported "
4574 "module.\n");
4575 unregister_netdev(adapter->netdev);
4531 return; 4576 return;
4532 } 4577 }
4533 hw->mac.ops.setup_sfp(hw); 4578 hw->mac.ops.setup_sfp(hw);
@@ -5095,9 +5140,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
5095 int count = 0; 5140 int count = 0;
5096 unsigned int f; 5141 unsigned int f;
5097 5142
5098 r_idx = skb->queue_mapping;
5099 tx_ring = &adapter->tx_ring[r_idx];
5100
5101 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 5143 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
5102 tx_flags |= vlan_tx_tag_get(skb); 5144 tx_flags |= vlan_tx_tag_get(skb);
5103 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 5145 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
@@ -5107,11 +5149,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
5107 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 5149 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5108 tx_flags |= IXGBE_TX_FLAGS_VLAN; 5150 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5109 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 5151 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5110 tx_flags |= (skb->queue_mapping << 13); 5152 if (skb->priority != TC_PRIO_CONTROL) {
5111 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 5153 tx_flags |= (skb->queue_mapping << 13);
5112 tx_flags |= IXGBE_TX_FLAGS_VLAN; 5154 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5155 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5156 } else {
5157 skb->queue_mapping =
5158 adapter->ring_feature[RING_F_DCB].indices-1;
5159 }
5113 } 5160 }
5114 5161
5162 r_idx = skb->queue_mapping;
5163 tx_ring = &adapter->tx_ring[r_idx];
5164
5115 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5165 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5116 (skb->protocol == htons(ETH_P_FCOE))) 5166 (skb->protocol == htons(ETH_P_FCOE)))
5117 tx_flags |= IXGBE_TX_FLAGS_FCOE; 5167 tx_flags |= IXGBE_TX_FLAGS_FCOE;
@@ -5310,12 +5360,19 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
5310static void ixgbe_netpoll(struct net_device *netdev) 5360static void ixgbe_netpoll(struct net_device *netdev)
5311{ 5361{
5312 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5362 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5363 int i;
5313 5364
5314 disable_irq(adapter->pdev->irq);
5315 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 5365 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
5316 ixgbe_intr(adapter->pdev->irq, netdev); 5366 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5367 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5368 for (i = 0; i < num_q_vectors; i++) {
5369 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
5370 ixgbe_msix_clean_many(0, q_vector);
5371 }
5372 } else {
5373 ixgbe_intr(adapter->pdev->irq, netdev);
5374 }
5317 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 5375 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
5318 enable_irq(adapter->pdev->irq);
5319} 5376}
5320#endif 5377#endif
5321 5378
@@ -5513,8 +5570,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5513 round_jiffies(jiffies + (2 * HZ))); 5570 round_jiffies(jiffies + (2 * HZ)));
5514 err = 0; 5571 err = 0;
5515 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 5572 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5516 dev_err(&adapter->pdev->dev, "failed to load because an " 5573 dev_err(&adapter->pdev->dev, "failed to initialize because "
5517 "unsupported SFP+ module type was detected.\n"); 5574 "an unsupported SFP+ module type was detected.\n"
5575 "Reload the driver after installing a supported "
5576 "module.\n");
5518 goto err_sw_init; 5577 goto err_sw_init;
5519 } else if (err) { 5578 } else if (err) {
5520 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); 5579 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
@@ -5548,29 +5607,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5548#endif 5607#endif
5549 5608
5550#ifdef IXGBE_FCOE 5609#ifdef IXGBE_FCOE
5551 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 5610 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
5552 if (hw->mac.ops.get_device_caps) { 5611 if (hw->mac.ops.get_device_caps) {
5553 hw->mac.ops.get_device_caps(hw, &device_caps); 5612 hw->mac.ops.get_device_caps(hw, &device_caps);
5554 if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) { 5613 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
5555 netdev->features |= NETIF_F_FCOE_CRC; 5614 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
5556 netdev->features |= NETIF_F_FSO;
5557 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
5558 DPRINTK(DRV, INFO, "FCoE enabled, "
5559 "disabling Flow Director\n");
5560 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
5561 adapter->flags &=
5562 ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
5563 adapter->atr_sample_rate = 0;
5564 } else {
5565 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5566 }
5567 } 5615 }
5568 } 5616 }
5569#endif /* IXGBE_FCOE */ 5617#endif /* IXGBE_FCOE */
5570 if (pci_using_dac) 5618 if (pci_using_dac)
5571 netdev->features |= NETIF_F_HIGHDMA; 5619 netdev->features |= NETIF_F_HIGHDMA;
5572 5620
5573 if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) 5621 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
5574 netdev->features |= NETIF_F_LRO; 5622 netdev->features |= NETIF_F_LRO;
5575 5623
5576 /* make sure the EEPROM is good */ 5624 /* make sure the EEPROM is good */
@@ -5612,7 +5660,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5612 adapter->wol = 0; 5660 adapter->wol = 0;
5613 break; 5661 break;
5614 } 5662 }
5615 device_init_wakeup(&adapter->pdev->dev, true);
5616 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 5663 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
5617 5664
5618 /* pick up the PCI bus settings for reporting later */ 5665 /* pick up the PCI bus settings for reporting later */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 453e966762f0..9ecad17522c3 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -60,6 +60,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
60 60
61 if (hw->phy.type == ixgbe_phy_unknown) { 61 if (hw->phy.type == ixgbe_phy_unknown) {
62 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 62 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
63 hw->phy.mdio.prtad = phy_addr;
63 if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { 64 if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
64 ixgbe_get_phy_id(hw); 65 ixgbe_get_phy_id(hw);
65 hw->phy.type = 66 hw->phy.type =
@@ -68,6 +69,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
68 break; 69 break;
69 } 70 }
70 } 71 }
72 /* clear value if nothing found */
73 hw->phy.mdio.prtad = 0;
71 } else { 74 } else {
72 status = 0; 75 status = 0;
73 } 76 }
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index fa87309dc087..be90eb4575f6 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -718,6 +718,12 @@
718#define IXGBE_ECC_STATUS_82599 0x110E0 718#define IXGBE_ECC_STATUS_82599 0x110E0
719#define IXGBE_BAR_CTRL_82599 0x110F4 719#define IXGBE_BAR_CTRL_82599 0x110F4
720 720
721/* PCI Express Control */
722#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
723#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
724#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
725#define IXGBE_GCR_CAP_VER2 0x00040000
726
721/* Time Sync Registers */ 727/* Time Sync Registers */
722#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ 728#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
723#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ 729#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1521,6 +1527,7 @@
1521 1527
1522/* PCI Bus Info */ 1528/* PCI Bus Info */
1523#define IXGBE_PCI_LINK_STATUS 0xB2 1529#define IXGBE_PCI_LINK_STATUS 0xB2
1530#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
1524#define IXGBE_PCI_LINK_WIDTH 0x3F0 1531#define IXGBE_PCI_LINK_WIDTH 0x3F0
1525#define IXGBE_PCI_LINK_WIDTH_1 0x10 1532#define IXGBE_PCI_LINK_WIDTH_1 0x10
1526#define IXGBE_PCI_LINK_WIDTH_2 0x20 1533#define IXGBE_PCI_LINK_WIDTH_2 0x20
@@ -1531,6 +1538,7 @@
1531#define IXGBE_PCI_LINK_SPEED_5000 0x2 1538#define IXGBE_PCI_LINK_SPEED_5000 0x2
1532#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E 1539#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
1533#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 1540#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
1541#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
1534 1542
1535/* Number of 100 microseconds we wait for PCI Express master disable */ 1543/* Number of 100 microseconds we wait for PCI Express master disable */
1536#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 1544#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index d12106b47bf2..2f286091394d 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -229,6 +229,7 @@ static int __init jazz_sonic_probe(struct platform_device *pdev)
229 lp = netdev_priv(dev); 229 lp = netdev_priv(dev);
230 lp->device = &pdev->dev; 230 lp->device = &pdev->dev;
231 SET_NETDEV_DEV(dev, &pdev->dev); 231 SET_NETDEV_DEV(dev, &pdev->dev);
232 platform_set_drvdata(pdev, dev);
232 233
233 netdev_boot_setup_check(dev); 234 netdev_boot_setup_check(dev);
234 235
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
new file mode 100644
index 000000000000..9a1dea60c1c4
--- /dev/null
+++ b/drivers/net/ks8851.c
@@ -0,0 +1,1322 @@
1/* drivers/net/ks8651.c
2 *
3 * Copyright 2009 Simtec Electronics
4 * http://www.simtec.co.uk/
5 * Ben Dooks <ben@simtec.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#define DEBUG
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/ethtool.h>
19#include <linux/cache.h>
20#include <linux/crc32.h>
21#include <linux/mii.h>
22
23#include <linux/spi/spi.h>
24
25#include "ks8851.h"
26
27/**
28 * struct ks8851_rxctrl - KS8851 driver rx control
29 * @mchash: Multicast hash-table data.
30 * @rxcr1: KS_RXCR1 register setting
31 * @rxcr2: KS_RXCR2 register setting
32 *
33 * Representation of the settings needs to control the receive filtering
34 * such as the multicast hash-filter and the receive register settings. This
35 * is used to make the job of working out if the receive settings change and
36 * then issuing the new settings to the worker that will send the necessary
37 * commands.
38 */
39struct ks8851_rxctrl {
40 u16 mchash[4];
41 u16 rxcr1;
42 u16 rxcr2;
43};
44
45/**
46 * union ks8851_tx_hdr - tx header data
47 * @txb: The header as bytes
48 * @txw: The header as 16bit, little-endian words
49 *
50 * A dual representation of the tx header data to allow
51 * access to individual bytes, and to allow 16bit accesses
52 * with 16bit alignment.
53 */
54union ks8851_tx_hdr {
55 u8 txb[6];
56 __le16 txw[3];
57};
58
59/**
60 * struct ks8851_net - KS8851 driver private data
61 * @netdev: The network device we're bound to
62 * @spidev: The spi device we're bound to.
63 * @lock: Lock to ensure that the device is not accessed when busy.
64 * @statelock: Lock on this structure for tx list.
65 * @mii: The MII state information for the mii calls.
66 * @rxctrl: RX settings for @rxctrl_work.
67 * @tx_work: Work queue for tx packets
68 * @irq_work: Work queue for servicing interrupts
69 * @rxctrl_work: Work queue for updating RX mode and multicast lists
70 * @txq: Queue of packets for transmission.
71 * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
72 * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2.
73 * @txh: Space for generating packet TX header in DMA-able data
74 * @rxd: Space for receiving SPI data, in DMA-able space.
75 * @txd: Space for transmitting SPI data, in DMA-able space.
76 * @msg_enable: The message flags controlling driver output (see ethtool).
77 * @fid: Incrementing frame id tag.
78 * @rc_ier: Cached copy of KS_IER.
79 * @rc_rxqcr: Cached copy of KS_RXQCR.
80 *
81 * The @lock ensures that the chip is protected when certain operations are
82 * in progress. When the read or write packet transfer is in progress, most
83 * of the chip registers are not ccessible until the transfer is finished and
84 * the DMA has been de-asserted.
85 *
86 * The @statelock is used to protect information in the structure which may
87 * need to be accessed via several sources, such as the network driver layer
88 * or one of the work queues.
89 *
90 * We align the buffers we may use for rx/tx to ensure that if the SPI driver
91 * wants to DMA map them, it will not have any problems with data the driver
92 * modifies.
93 */
94struct ks8851_net {
95 struct net_device *netdev;
96 struct spi_device *spidev;
97 struct mutex lock;
98 spinlock_t statelock;
99
100 union ks8851_tx_hdr txh ____cacheline_aligned;
101 u8 rxd[8];
102 u8 txd[8];
103
104 u32 msg_enable ____cacheline_aligned;
105 u16 tx_space;
106 u8 fid;
107
108 u16 rc_ier;
109 u16 rc_rxqcr;
110
111 struct mii_if_info mii;
112 struct ks8851_rxctrl rxctrl;
113
114 struct work_struct tx_work;
115 struct work_struct irq_work;
116 struct work_struct rxctrl_work;
117
118 struct sk_buff_head txq;
119
120 struct spi_message spi_msg1;
121 struct spi_message spi_msg2;
122 struct spi_transfer spi_xfer1;
123 struct spi_transfer spi_xfer2[2];
124};
125
126static int msg_enable;
127
128#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg)
129#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg)
130#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg)
131#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg)
132
133/* shift for byte-enable data */
134#define BYTE_EN(_x) ((_x) << 2)
135
136/* turn register number and byte-enable mask into data for start of packet */
137#define MK_OP(_byteen, _reg) (BYTE_EN(_byteen) | (_reg) << (8+2) | (_reg) >> 6)
138
139/* SPI register read/write calls.
140 *
141 * All these calls issue SPI transactions to access the chip's registers. They
142 * all require that the necessary lock is held to prevent accesses when the
143 * chip is busy transfering packet data (RX/TX FIFO accesses).
144 */
145
146/**
147 * ks8851_wrreg16 - write 16bit register value to chip
148 * @ks: The chip state
149 * @reg: The register address
150 * @val: The value to write
151 *
152 * Issue a write to put the value @val into the register specified in @reg.
153 */
154static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
155{
156 struct spi_transfer *xfer = &ks->spi_xfer1;
157 struct spi_message *msg = &ks->spi_msg1;
158 __le16 txb[2];
159 int ret;
160
161 txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR);
162 txb[1] = cpu_to_le16(val);
163
164 xfer->tx_buf = txb;
165 xfer->rx_buf = NULL;
166 xfer->len = 4;
167
168 ret = spi_sync(ks->spidev, msg);
169 if (ret < 0)
170 ks_err(ks, "spi_sync() failed\n");
171}
172
173/**
174 * ks8851_rx_1msg - select whether to use one or two messages for spi read
175 * @ks: The device structure
176 *
177 * Return whether to generate a single message with a tx and rx buffer
178 * supplied to spi_sync(), or alternatively send the tx and rx buffers
179 * as separate messages.
180 *
181 * Depending on the hardware in use, a single message may be more efficient
182 * on interrupts or work done by the driver.
183 *
184 * This currently always returns true until we add some per-device data passed
185 * from the platform code to specify which mode is better.
186 */
187static inline bool ks8851_rx_1msg(struct ks8851_net *ks)
188{
189 return true;
190}
191
192/**
193 * ks8851_rdreg - issue read register command and return the data
194 * @ks: The device state
195 * @op: The register address and byte enables in message format.
196 * @rxb: The RX buffer to return the result into
197 * @rxl: The length of data expected.
198 *
199 * This is the low level read call that issues the necessary spi message(s)
200 * to read data from the register specified in @op.
201 */
202static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
203 u8 *rxb, unsigned rxl)
204{
205 struct spi_transfer *xfer;
206 struct spi_message *msg;
207 __le16 *txb = (__le16 *)ks->txd;
208 u8 *trx = ks->rxd;
209 int ret;
210
211 txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
212
213 if (ks8851_rx_1msg(ks)) {
214 msg = &ks->spi_msg1;
215 xfer = &ks->spi_xfer1;
216
217 xfer->tx_buf = txb;
218 xfer->rx_buf = trx;
219 xfer->len = rxl + 2;
220 } else {
221 msg = &ks->spi_msg2;
222 xfer = ks->spi_xfer2;
223
224 xfer->tx_buf = txb;
225 xfer->rx_buf = NULL;
226 xfer->len = 2;
227
228 xfer++;
229 xfer->tx_buf = NULL;
230 xfer->rx_buf = trx;
231 xfer->len = rxl;
232 }
233
234 ret = spi_sync(ks->spidev, msg);
235 if (ret < 0)
236 ks_err(ks, "read: spi_sync() failed\n");
237 else if (ks8851_rx_1msg(ks))
238 memcpy(rxb, trx + 2, rxl);
239 else
240 memcpy(rxb, trx, rxl);
241}
242
243/**
244 * ks8851_rdreg8 - read 8 bit register from device
245 * @ks: The chip information
246 * @reg: The register address
247 *
248 * Read a 8bit register from the chip, returning the result
249*/
250static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg)
251{
252 u8 rxb[1];
253
254 ks8851_rdreg(ks, MK_OP(1 << (reg & 3), reg), rxb, 1);
255 return rxb[0];
256}
257
258/**
259 * ks8851_rdreg16 - read 16 bit register from device
260 * @ks: The chip information
261 * @reg: The register address
262 *
263 * Read a 16bit register from the chip, returning the result
264*/
265static unsigned ks8851_rdreg16(struct ks8851_net *ks, unsigned reg)
266{
267 __le16 rx = 0;
268
269 ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2);
270 return le16_to_cpu(rx);
271}
272
273/**
274 * ks8851_rdreg32 - read 32 bit register from device
275 * @ks: The chip information
276 * @reg: The register address
277 *
278 * Read a 32bit register from the chip.
279 *
280 * Note, this read requires the address be aligned to 4 bytes.
281*/
282static unsigned ks8851_rdreg32(struct ks8851_net *ks, unsigned reg)
283{
284 __le32 rx = 0;
285
286 WARN_ON(reg & 3);
287
288 ks8851_rdreg(ks, MK_OP(0xf, reg), (u8 *)&rx, 4);
289 return le32_to_cpu(rx);
290}
291
292/**
293 * ks8851_soft_reset - issue one of the soft reset to the device
294 * @ks: The device state.
295 * @op: The bit(s) to set in the GRR
296 *
297 * Issue the relevant soft-reset command to the device's GRR register
298 * specified by @op.
299 *
300 * Note, the delays are in there as a caution to ensure that the reset
301 * has time to take effect and then complete. Since the datasheet does
302 * not currently specify the exact sequence, we have chosen something
303 * that seems to work with our device.
304 */
305static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
306{
307 ks8851_wrreg16(ks, KS_GRR, op);
308 mdelay(1); /* wait a short time to effect reset */
309 ks8851_wrreg16(ks, KS_GRR, 0);
310 mdelay(1); /* wait for condition to clear */
311}
312
313/**
314 * ks8851_write_mac_addr - write mac address to device registers
315 * @dev: The network device
316 *
317 * Update the KS8851 MAC address registers from the address in @dev.
318 *
319 * This call assumes that the chip is not running, so there is no need to
320 * shutdown the RXQ process whilst setting this.
321*/
322static int ks8851_write_mac_addr(struct net_device *dev)
323{
324 struct ks8851_net *ks = netdev_priv(dev);
325 u16 *mcp = (u16 *)dev->dev_addr;
326
327 mutex_lock(&ks->lock);
328
329 ks8851_wrreg16(ks, KS_MARL, mcp[0]);
330 ks8851_wrreg16(ks, KS_MARM, mcp[1]);
331 ks8851_wrreg16(ks, KS_MARH, mcp[2]);
332
333 mutex_unlock(&ks->lock);
334
335 return 0;
336}
337
338/**
339 * ks8851_init_mac - initialise the mac address
340 * @ks: The device structure
341 *
342 * Get or create the initial mac address for the device and then set that
343 * into the station address register. Currently we assume that the device
344 * does not have a valid mac address in it, and so we use random_ether_addr()
345 * to create a new one.
346 *
347 * In future, the driver should check to see if the device has an EEPROM
348 * attached and whether that has a valid ethernet address in it.
349 */
350static void ks8851_init_mac(struct ks8851_net *ks)
351{
352 struct net_device *dev = ks->netdev;
353
354 random_ether_addr(dev->dev_addr);
355 ks8851_write_mac_addr(dev);
356}
357
358/**
359 * ks8851_irq - device interrupt handler
360 * @irq: Interrupt number passed from the IRQ hnalder.
361 * @pw: The private word passed to register_irq(), our struct ks8851_net.
362 *
363 * Disable the interrupt from happening again until we've processed the
364 * current status by scheduling ks8851_irq_work().
365 */
366static irqreturn_t ks8851_irq(int irq, void *pw)
367{
368 struct ks8851_net *ks = pw;
369
370 disable_irq_nosync(irq);
371 schedule_work(&ks->irq_work);
372 return IRQ_HANDLED;
373}
374
375/**
376 * ks8851_rdfifo - read data from the receive fifo
377 * @ks: The device state.
378 * @buff: The buffer address
379 * @len: The length of the data to read
380 *
381 * Issue an RXQ FIFO read command and read the @len ammount of data from
382 * the FIFO into the buffer specified by @buff.
383 */
384static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
385{
386 struct spi_transfer *xfer = ks->spi_xfer2;
387 struct spi_message *msg = &ks->spi_msg2;
388 u8 txb[1];
389 int ret;
390
391 if (netif_msg_rx_status(ks))
392 ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff);
393
394 /* set the operation we're issuing */
395 txb[0] = KS_SPIOP_RXFIFO;
396
397 xfer->tx_buf = txb;
398 xfer->rx_buf = NULL;
399 xfer->len = 1;
400
401 xfer++;
402 xfer->rx_buf = buff;
403 xfer->tx_buf = NULL;
404 xfer->len = len;
405
406 ret = spi_sync(ks->spidev, msg);
407 if (ret < 0)
408 ks_err(ks, "%s: spi_sync() failed\n", __func__);
409}
410
411/**
412 * ks8851_dbg_dumpkkt - dump initial packet contents to debug
413 * @ks: The device state
414 * @rxpkt: The data for the received packet
415 *
416 * Dump the initial data from the packet to dev_dbg().
417*/
418static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
419{
420 ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
421 rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
422 rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
423 rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
424}
425
426/**
427 * ks8851_rx_pkts - receive packets from the host
428 * @ks: The device information.
429 *
430 * This is called from the IRQ work queue when the system detects that there
431 * are packets in the receive queue. Find out how many packets there are and
432 * read them from the FIFO.
433 */
434static void ks8851_rx_pkts(struct ks8851_net *ks)
435{
436 struct sk_buff *skb;
437 unsigned rxfc;
438 unsigned rxlen;
439 unsigned rxstat;
440 u32 rxh;
441 u8 *rxpkt;
442
443 rxfc = ks8851_rdreg8(ks, KS_RXFC);
444
445 if (netif_msg_rx_status(ks))
446 ks_dbg(ks, "%s: %d packets\n", __func__, rxfc);
447
448 /* Currently we're issuing a read per packet, but we could possibly
449 * improve the code by issuing a single read, getting the receive
450 * header, allocating the packet and then reading the packet data
451 * out in one go.
452 *
453 * This form of operation would require us to hold the SPI bus'
454 * chipselect low during the entie transaction to avoid any
455 * reset to the data stream comming from the chip.
456 */
457
458 for (; rxfc != 0; rxfc--) {
459 rxh = ks8851_rdreg32(ks, KS_RXFHSR);
460 rxstat = rxh & 0xffff;
461 rxlen = rxh >> 16;
462
463 if (netif_msg_rx_status(ks))
464 ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n",
465 rxstat, rxlen);
466
467 /* the length of the packet includes the 32bit CRC */
468
469 /* set dma read address */
470 ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
471
472 /* start the packet dma process, and set auto-dequeue rx */
473 ks8851_wrreg16(ks, KS_RXQCR,
474 ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
475
476 if (rxlen > 0) {
477 skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
478 if (!skb) {
479 /* todo - dump frame and move on */
480 }
481
482 /* two bytes to ensure ip is aligned, and four bytes
483 * for the status header and 4 bytes of garbage */
484 skb_reserve(skb, 2 + 4 + 4);
485
486 rxpkt = skb_put(skb, rxlen - 4) - 8;
487
488 /* align the packet length to 4 bytes, and add 4 bytes
489 * as we're getting the rx status header as well */
490 ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
491
492 if (netif_msg_pktdata(ks))
493 ks8851_dbg_dumpkkt(ks, rxpkt);
494
495 skb->protocol = eth_type_trans(skb, ks->netdev);
496 netif_rx(skb);
497
498 ks->netdev->stats.rx_packets++;
499 ks->netdev->stats.rx_bytes += rxlen - 4;
500 }
501
502 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
503 }
504}
505
506/**
507 * ks8851_irq_work - work queue handler for dealing with interrupt requests
508 * @work: The work structure that was scheduled by schedule_work()
509 *
510 * This is the handler invoked when the ks8851_irq() is called to find out
511 * what happened, as we cannot allow ourselves to sleep whilst waiting for
512 * anything other process has the chip's lock.
513 *
514 * Read the interrupt status, work out what needs to be done and then clear
515 * any of the interrupts that are not needed.
516 */
517static void ks8851_irq_work(struct work_struct *work)
518{
519 struct ks8851_net *ks = container_of(work, struct ks8851_net, irq_work);
520 unsigned status;
521 unsigned handled = 0;
522
523 mutex_lock(&ks->lock);
524
525 status = ks8851_rdreg16(ks, KS_ISR);
526
527 if (netif_msg_intr(ks))
528 dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n",
529 __func__, status);
530
531 if (status & IRQ_LCI) {
532 /* should do something about checking link status */
533 handled |= IRQ_LCI;
534 }
535
536 if (status & IRQ_LDI) {
537 u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
538 pmecr &= ~PMECR_WKEVT_MASK;
539 ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
540
541 handled |= IRQ_LDI;
542 }
543
544 if (status & IRQ_RXPSI)
545 handled |= IRQ_RXPSI;
546
547 if (status & IRQ_TXI) {
548 handled |= IRQ_TXI;
549
550 /* no lock here, tx queue should have been stopped */
551
552 /* update our idea of how much tx space is available to the
553 * system */
554 ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
555
556 if (netif_msg_intr(ks))
557 ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space);
558 }
559
560 if (status & IRQ_RXI)
561 handled |= IRQ_RXI;
562
563 if (status & IRQ_SPIBEI) {
564 dev_err(&ks->spidev->dev, "%s: spi bus error\n", __func__);
565 handled |= IRQ_SPIBEI;
566 }
567
568 ks8851_wrreg16(ks, KS_ISR, handled);
569
570 if (status & IRQ_RXI) {
571 /* the datasheet says to disable the rx interrupt during
572 * packet read-out, however we're masking the interrupt
573 * from the device so do not bother masking just the RX
574 * from the device. */
575
576 ks8851_rx_pkts(ks);
577 }
578
579 /* if something stopped the rx process, probably due to wanting
580 * to change the rx settings, then do something about restarting
581 * it. */
582 if (status & IRQ_RXPSI) {
583 struct ks8851_rxctrl *rxc = &ks->rxctrl;
584
585 /* update the multicast hash table */
586 ks8851_wrreg16(ks, KS_MAHTR0, rxc->mchash[0]);
587 ks8851_wrreg16(ks, KS_MAHTR1, rxc->mchash[1]);
588 ks8851_wrreg16(ks, KS_MAHTR2, rxc->mchash[2]);
589 ks8851_wrreg16(ks, KS_MAHTR3, rxc->mchash[3]);
590
591 ks8851_wrreg16(ks, KS_RXCR2, rxc->rxcr2);
592 ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1);
593 }
594
595 mutex_unlock(&ks->lock);
596
597 if (status & IRQ_TXI)
598 netif_wake_queue(ks->netdev);
599
600 enable_irq(ks->netdev->irq);
601}
602
603/**
604 * calc_txlen - calculate size of message to send packet
605 * @len: Lenght of data
606 *
607 * Returns the size of the TXFIFO message needed to send
608 * this packet.
609 */
610static inline unsigned calc_txlen(unsigned len)
611{
612 return ALIGN(len + 4, 4);
613}
614
615/**
616 * ks8851_wrpkt - write packet to TX FIFO
617 * @ks: The device state.
618 * @txp: The sk_buff to transmit.
619 * @irq: IRQ on completion of the packet.
620 *
621 * Send the @txp to the chip. This means creating the relevant packet header
622 * specifying the length of the packet and the other information the chip
623 * needs, such as IRQ on completion. Send the header and the packet data to
624 * the device.
625 */
626static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
627{
628 struct spi_transfer *xfer = ks->spi_xfer2;
629 struct spi_message *msg = &ks->spi_msg2;
630 unsigned fid = 0;
631 int ret;
632
633 if (netif_msg_tx_queued(ks))
634 dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n",
635 __func__, txp, txp->len, txp->data, irq);
636
637 fid = ks->fid++;
638 fid &= TXFR_TXFID_MASK;
639
640 if (irq)
641 fid |= TXFR_TXIC; /* irq on completion */
642
643 /* start header at txb[1] to align txw entries */
644 ks->txh.txb[1] = KS_SPIOP_TXFIFO;
645 ks->txh.txw[1] = cpu_to_le16(fid);
646 ks->txh.txw[2] = cpu_to_le16(txp->len);
647
648 xfer->tx_buf = &ks->txh.txb[1];
649 xfer->rx_buf = NULL;
650 xfer->len = 5;
651
652 xfer++;
653 xfer->tx_buf = txp->data;
654 xfer->rx_buf = NULL;
655 xfer->len = ALIGN(txp->len, 4);
656
657 ret = spi_sync(ks->spidev, msg);
658 if (ret < 0)
659 ks_err(ks, "%s: spi_sync() failed\n", __func__);
660}
661
662/**
663 * ks8851_done_tx - update and then free skbuff after transmitting
664 * @ks: The device state
665 * @txb: The buffer transmitted
666 */
667static void ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb)
668{
669 struct net_device *dev = ks->netdev;
670
671 dev->stats.tx_bytes += txb->len;
672 dev->stats.tx_packets++;
673
674 dev_kfree_skb(txb);
675}
676
677/**
678 * ks8851_tx_work - process tx packet(s)
679 * @work: The work strucutre what was scheduled.
680 *
681 * This is called when a number of packets have been scheduled for
682 * transmission and need to be sent to the device.
683 */
684static void ks8851_tx_work(struct work_struct *work)
685{
686 struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work);
687 struct sk_buff *txb;
688 bool last = false;
689
690 mutex_lock(&ks->lock);
691
692 while (!last) {
693 txb = skb_dequeue(&ks->txq);
694 last = skb_queue_empty(&ks->txq);
695
696 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
697 ks8851_wrpkt(ks, txb, last);
698 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
699 ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
700
701 ks8851_done_tx(ks, txb);
702 }
703
704 mutex_unlock(&ks->lock);
705}
706
707/**
708 * ks8851_set_powermode - set power mode of the device
709 * @ks: The device state
710 * @pwrmode: The power mode value to write to KS_PMECR.
711 *
712 * Change the power mode of the chip.
713 */
714static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
715{
716 unsigned pmecr;
717
718 if (netif_msg_hw(ks))
719 ks_dbg(ks, "setting power mode %d\n", pwrmode);
720
721 pmecr = ks8851_rdreg16(ks, KS_PMECR);
722 pmecr &= ~PMECR_PM_MASK;
723 pmecr |= pwrmode;
724
725 ks8851_wrreg16(ks, KS_PMECR, pmecr);
726}
727
728/**
729 * ks8851_net_open - open network device
730 * @dev: The network device being opened.
731 *
732 * Called when the network device is marked active, such as a user executing
733 * 'ifconfig up' on the device.
734 */
735static int ks8851_net_open(struct net_device *dev)
736{
737 struct ks8851_net *ks = netdev_priv(dev);
738
739 /* lock the card, even if we may not actually be doing anything
740 * else at the moment */
741 mutex_lock(&ks->lock);
742
743 if (netif_msg_ifup(ks))
744 ks_dbg(ks, "opening %s\n", dev->name);
745
746 /* bring chip out of any power saving mode it was in */
747 ks8851_set_powermode(ks, PMECR_PM_NORMAL);
748
749 /* issue a soft reset to the RX/TX QMU to put it into a known
750 * state. */
751 ks8851_soft_reset(ks, GRR_QMU);
752
753 /* setup transmission parameters */
754
755 ks8851_wrreg16(ks, KS_TXCR, (TXCR_TXE | /* enable transmit process */
756 TXCR_TXPE | /* pad to min length */
757 TXCR_TXCRC | /* add CRC */
758 TXCR_TXFCE)); /* enable flow control */
759
760 /* auto-increment tx data, reset tx pointer */
761 ks8851_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
762
763 /* setup receiver control */
764
765 ks8851_wrreg16(ks, KS_RXCR1, (RXCR1_RXPAFMA | /* from mac filter */
766 RXCR1_RXFCE | /* enable flow control */
767 RXCR1_RXBE | /* broadcast enable */
768 RXCR1_RXUE | /* unicast enable */
769 RXCR1_RXE)); /* enable rx block */
770
771 /* transfer entire frames out in one go */
772 ks8851_wrreg16(ks, KS_RXCR2, RXCR2_SRDBL_FRAME);
773
774 /* set receive counter timeouts */
775 ks8851_wrreg16(ks, KS_RXDTTR, 1000); /* 1ms after first frame to IRQ */
776 ks8851_wrreg16(ks, KS_RXDBCTR, 4096); /* >4Kbytes in buffer to IRQ */
777 ks8851_wrreg16(ks, KS_RXFCTR, 10); /* 10 frames to IRQ */
778
779 ks->rc_rxqcr = (RXQCR_RXFCTE | /* IRQ on frame count exceeded */
780 RXQCR_RXDBCTE | /* IRQ on byte count exceeded */
781 RXQCR_RXDTTE); /* IRQ on time exceeded */
782
783 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
784
785 /* clear then enable interrupts */
786
787#define STD_IRQ (IRQ_LCI | /* Link Change */ \
788 IRQ_TXI | /* TX done */ \
789 IRQ_RXI | /* RX done */ \
790 IRQ_SPIBEI | /* SPI bus error */ \
791 IRQ_TXPSI | /* TX process stop */ \
792 IRQ_RXPSI) /* RX process stop */
793
794 ks->rc_ier = STD_IRQ;
795 ks8851_wrreg16(ks, KS_ISR, STD_IRQ);
796 ks8851_wrreg16(ks, KS_IER, STD_IRQ);
797
798 netif_start_queue(ks->netdev);
799
800 if (netif_msg_ifup(ks))
801 ks_dbg(ks, "network device %s up\n", dev->name);
802
803 mutex_unlock(&ks->lock);
804 return 0;
805}
806
807/**
808 * ks8851_net_stop - close network device
809 * @dev: The device being closed.
810 *
811 * Called to close down a network device which has been active. Cancell any
812 * work, shutdown the RX and TX process and then place the chip into a low
813 * power state whilst it is not being used.
814 */
815static int ks8851_net_stop(struct net_device *dev)
816{
817 struct ks8851_net *ks = netdev_priv(dev);
818
819 if (netif_msg_ifdown(ks))
820 ks_info(ks, "%s: shutting down\n", dev->name);
821
822 netif_stop_queue(dev);
823
824 mutex_lock(&ks->lock);
825
826 /* stop any outstanding work */
827 flush_work(&ks->irq_work);
828 flush_work(&ks->tx_work);
829 flush_work(&ks->rxctrl_work);
830
831 /* turn off the IRQs and ack any outstanding */
832 ks8851_wrreg16(ks, KS_IER, 0x0000);
833 ks8851_wrreg16(ks, KS_ISR, 0xffff);
834
835 /* shutdown RX process */
836 ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
837
838 /* shutdown TX process */
839 ks8851_wrreg16(ks, KS_TXCR, 0x0000);
840
841 /* set powermode to soft power down to save power */
842 ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
843
844 /* ensure any queued tx buffers are dumped */
845 while (!skb_queue_empty(&ks->txq)) {
846 struct sk_buff *txb = skb_dequeue(&ks->txq);
847
848 if (netif_msg_ifdown(ks))
849 ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb);
850
851 dev_kfree_skb(txb);
852 }
853
854 mutex_unlock(&ks->lock);
855 return 0;
856}
857
858/**
859 * ks8851_start_xmit - transmit packet
860 * @skb: The buffer to transmit
861 * @dev: The device used to transmit the packet.
862 *
863 * Called by the network layer to transmit the @skb. Queue the packet for
864 * the device and schedule the necessary work to transmit the packet when
865 * it is free.
866 *
867 * We do this to firstly avoid sleeping with the network device locked,
868 * and secondly so we can round up more than one packet to transmit which
869 * means we can try and avoid generating too many transmit done interrupts.
870 */
871static int ks8851_start_xmit(struct sk_buff *skb, struct net_device *dev)
872{
873 struct ks8851_net *ks = netdev_priv(dev);
874 unsigned needed = calc_txlen(skb->len);
875 int ret = NETDEV_TX_OK;
876
877 if (netif_msg_tx_queued(ks))
878 ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__,
879 skb, skb->len, skb->data);
880
881 spin_lock(&ks->statelock);
882
883 if (needed > ks->tx_space) {
884 netif_stop_queue(dev);
885 ret = NETDEV_TX_BUSY;
886 } else {
887 ks->tx_space -= needed;
888 skb_queue_tail(&ks->txq, skb);
889 }
890
891 spin_unlock(&ks->statelock);
892 schedule_work(&ks->tx_work);
893
894 return ret;
895}
896
897/**
898 * ks8851_rxctrl_work - work handler to change rx mode
899 * @work: The work structure this belongs to.
900 *
901 * Lock the device and issue the necessary changes to the receive mode from
902 * the network device layer. This is done so that we can do this without
903 * having to sleep whilst holding the network device lock.
904 *
905 * Since the recommendation from Micrel is that the RXQ is shutdown whilst the
906 * receive parameters are programmed, we issue a write to disable the RXQ and
907 * then wait for the interrupt handler to be triggered once the RXQ shutdown is
908 * complete. The interrupt handler then writes the new values into the chip.
909 */
910static void ks8851_rxctrl_work(struct work_struct *work)
911{
912 struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
913
914 mutex_lock(&ks->lock);
915
916 /* need to shutdown RXQ before modifying filter parameters */
917 ks8851_wrreg16(ks, KS_RXCR1, 0x00);
918
919 mutex_unlock(&ks->lock);
920}
921
922static void ks8851_set_rx_mode(struct net_device *dev)
923{
924 struct ks8851_net *ks = netdev_priv(dev);
925 struct ks8851_rxctrl rxctrl;
926
927 memset(&rxctrl, 0, sizeof(rxctrl));
928
929 if (dev->flags & IFF_PROMISC) {
930 /* interface to receive everything */
931
932 rxctrl.rxcr1 = RXCR1_RXAE | RXCR1_RXINVF;
933 } else if (dev->flags & IFF_ALLMULTI) {
934 /* accept all multicast packets */
935
936 rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
937 RXCR1_RXPAFMA | RXCR1_RXMAFMA);
938 } else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) {
939 struct dev_mc_list *mcptr = dev->mc_list;
940 u32 crc;
941 int i;
942
943 /* accept some multicast */
944
945 for (i = dev->mc_count; i > 0; i--) {
946 crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
947 crc >>= (32 - 6); /* get top six bits */
948
949 rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
950 mcptr = mcptr->next;
951 }
952
953 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA;
954 } else {
955 /* just accept broadcast / unicast */
956 rxctrl.rxcr1 = RXCR1_RXPAFMA;
957 }
958
959 rxctrl.rxcr1 |= (RXCR1_RXUE | /* unicast enable */
960 RXCR1_RXBE | /* broadcast enable */
961 RXCR1_RXE | /* RX process enable */
962 RXCR1_RXFCE); /* enable flow control */
963
964 rxctrl.rxcr2 |= RXCR2_SRDBL_FRAME;
965
966 /* schedule work to do the actual set of the data if needed */
967
968 spin_lock(&ks->statelock);
969
970 if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
971 memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
972 schedule_work(&ks->rxctrl_work);
973 }
974
975 spin_unlock(&ks->statelock);
976}
977
978static int ks8851_set_mac_address(struct net_device *dev, void *addr)
979{
980 struct sockaddr *sa = addr;
981
982 if (netif_running(dev))
983 return -EBUSY;
984
985 if (!is_valid_ether_addr(sa->sa_data))
986 return -EADDRNOTAVAIL;
987
988 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
989 return ks8851_write_mac_addr(dev);
990}
991
992static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
993{
994 struct ks8851_net *ks = netdev_priv(dev);
995
996 if (!netif_running(dev))
997 return -EINVAL;
998
999 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1000}
1001
1002static const struct net_device_ops ks8851_netdev_ops = {
1003 .ndo_open = ks8851_net_open,
1004 .ndo_stop = ks8851_net_stop,
1005 .ndo_do_ioctl = ks8851_net_ioctl,
1006 .ndo_start_xmit = ks8851_start_xmit,
1007 .ndo_set_mac_address = ks8851_set_mac_address,
1008 .ndo_set_rx_mode = ks8851_set_rx_mode,
1009 .ndo_change_mtu = eth_change_mtu,
1010 .ndo_validate_addr = eth_validate_addr,
1011};
1012
1013/* ethtool support */
1014
1015static void ks8851_get_drvinfo(struct net_device *dev,
1016 struct ethtool_drvinfo *di)
1017{
1018 strlcpy(di->driver, "KS8851", sizeof(di->driver));
1019 strlcpy(di->version, "1.00", sizeof(di->version));
1020 strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
1021}
1022
1023static u32 ks8851_get_msglevel(struct net_device *dev)
1024{
1025 struct ks8851_net *ks = netdev_priv(dev);
1026 return ks->msg_enable;
1027}
1028
1029static void ks8851_set_msglevel(struct net_device *dev, u32 to)
1030{
1031 struct ks8851_net *ks = netdev_priv(dev);
1032 ks->msg_enable = to;
1033}
1034
1035static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1036{
1037 struct ks8851_net *ks = netdev_priv(dev);
1038 return mii_ethtool_gset(&ks->mii, cmd);
1039}
1040
1041static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1042{
1043 struct ks8851_net *ks = netdev_priv(dev);
1044 return mii_ethtool_sset(&ks->mii, cmd);
1045}
1046
1047static u32 ks8851_get_link(struct net_device *dev)
1048{
1049 struct ks8851_net *ks = netdev_priv(dev);
1050 return mii_link_ok(&ks->mii);
1051}
1052
1053static int ks8851_nway_reset(struct net_device *dev)
1054{
1055 struct ks8851_net *ks = netdev_priv(dev);
1056 return mii_nway_restart(&ks->mii);
1057}
1058
1059static const struct ethtool_ops ks8851_ethtool_ops = {
1060 .get_drvinfo = ks8851_get_drvinfo,
1061 .get_msglevel = ks8851_get_msglevel,
1062 .set_msglevel = ks8851_set_msglevel,
1063 .get_settings = ks8851_get_settings,
1064 .set_settings = ks8851_set_settings,
1065 .get_link = ks8851_get_link,
1066 .nway_reset = ks8851_nway_reset,
1067};
1068
1069/* MII interface controls */
1070
1071/**
1072 * ks8851_phy_reg - convert MII register into a KS8851 register
1073 * @reg: MII register number.
1074 *
1075 * Return the KS8851 register number for the corresponding MII PHY register
1076 * if possible. Return zero if the MII register has no direct mapping to the
1077 * KS8851 register set.
1078 */
1079static int ks8851_phy_reg(int reg)
1080{
1081 switch (reg) {
1082 case MII_BMCR:
1083 return KS_P1MBCR;
1084 case MII_BMSR:
1085 return KS_P1MBSR;
1086 case MII_PHYSID1:
1087 return KS_PHY1ILR;
1088 case MII_PHYSID2:
1089 return KS_PHY1IHR;
1090 case MII_ADVERTISE:
1091 return KS_P1ANAR;
1092 case MII_LPA:
1093 return KS_P1ANLPR;
1094 }
1095
1096 return 0x0;
1097}
1098
1099/**
1100 * ks8851_phy_read - MII interface PHY register read.
1101 * @dev: The network device the PHY is on.
1102 * @phy_addr: Address of PHY (ignored as we only have one)
1103 * @reg: The register to read.
1104 *
1105 * This call reads data from the PHY register specified in @reg. Since the
1106 * device does not support all the MII registers, the non-existant values
1107 * are always returned as zero.
1108 *
1109 * We return zero for unsupported registers as the MII code does not check
1110 * the value returned for any error status, and simply returns it to the
1111 * caller. The mii-tool that the driver was tested with takes any -ve error
1112 * as real PHY capabilities, thus displaying incorrect data to the user.
1113 */
1114static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
1115{
1116 struct ks8851_net *ks = netdev_priv(dev);
1117 int ksreg;
1118 int result;
1119
1120 ksreg = ks8851_phy_reg(reg);
1121 if (!ksreg)
1122 return 0x0; /* no error return allowed, so use zero */
1123
1124 mutex_lock(&ks->lock);
1125 result = ks8851_rdreg16(ks, ksreg);
1126 mutex_unlock(&ks->lock);
1127
1128 return result;
1129}
1130
1131static void ks8851_phy_write(struct net_device *dev,
1132 int phy, int reg, int value)
1133{
1134 struct ks8851_net *ks = netdev_priv(dev);
1135 int ksreg;
1136
1137 ksreg = ks8851_phy_reg(reg);
1138 if (ksreg) {
1139 mutex_lock(&ks->lock);
1140 ks8851_wrreg16(ks, ksreg, value);
1141 mutex_unlock(&ks->lock);
1142 }
1143}
1144
1145/**
1146 * ks8851_read_selftest - read the selftest memory info.
1147 * @ks: The device state
1148 *
1149 * Read and check the TX/RX memory selftest information.
1150 */
1151static int ks8851_read_selftest(struct ks8851_net *ks)
1152{
1153 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1154 int ret = 0;
1155 unsigned rd;
1156
1157 rd = ks8851_rdreg16(ks, KS_MBIR);
1158
1159 if ((rd & both_done) != both_done) {
1160 ks_warn(ks, "Memory selftest not finished\n");
1161 return 0;
1162 }
1163
1164 if (rd & MBIR_TXMBFA) {
1165 ks_err(ks, "TX memory selftest fail\n");
1166 ret |= 1;
1167 }
1168
1169 if (rd & MBIR_RXMBFA) {
1170 ks_err(ks, "RX memory selftest fail\n");
1171 ret |= 2;
1172 }
1173
1174 return 0;
1175}
1176
1177/* driver bus management functions */
1178
1179static int __devinit ks8851_probe(struct spi_device *spi)
1180{
1181 struct net_device *ndev;
1182 struct ks8851_net *ks;
1183 int ret;
1184
1185 ndev = alloc_etherdev(sizeof(struct ks8851_net));
1186 if (!ndev) {
1187 dev_err(&spi->dev, "failed to alloc ethernet device\n");
1188 return -ENOMEM;
1189 }
1190
1191 spi->bits_per_word = 8;
1192
1193 ks = netdev_priv(ndev);
1194
1195 ks->netdev = ndev;
1196 ks->spidev = spi;
1197 ks->tx_space = 6144;
1198
1199 mutex_init(&ks->lock);
1200 spin_lock_init(&ks->statelock);
1201
1202 INIT_WORK(&ks->tx_work, ks8851_tx_work);
1203 INIT_WORK(&ks->irq_work, ks8851_irq_work);
1204 INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
1205
1206 /* initialise pre-made spi transfer messages */
1207
1208 spi_message_init(&ks->spi_msg1);
1209 spi_message_add_tail(&ks->spi_xfer1, &ks->spi_msg1);
1210
1211 spi_message_init(&ks->spi_msg2);
1212 spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
1213 spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
1214
1215 /* setup mii state */
1216 ks->mii.dev = ndev;
1217 ks->mii.phy_id = 1,
1218 ks->mii.phy_id_mask = 1;
1219 ks->mii.reg_num_mask = 0xf;
1220 ks->mii.mdio_read = ks8851_phy_read;
1221 ks->mii.mdio_write = ks8851_phy_write;
1222
1223 dev_info(&spi->dev, "message enable is %d\n", msg_enable);
1224
1225 /* set the default message enable */
1226 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1227 NETIF_MSG_PROBE |
1228 NETIF_MSG_LINK));
1229
1230 skb_queue_head_init(&ks->txq);
1231
1232 SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
1233 SET_NETDEV_DEV(ndev, &spi->dev);
1234
1235 dev_set_drvdata(&spi->dev, ks);
1236
1237 ndev->if_port = IF_PORT_100BASET;
1238 ndev->netdev_ops = &ks8851_netdev_ops;
1239 ndev->irq = spi->irq;
1240
1241 /* simple check for a valid chip being connected to the bus */
1242
1243 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1244 dev_err(&spi->dev, "failed to read device ID\n");
1245 ret = -ENODEV;
1246 goto err_id;
1247 }
1248
1249 ks8851_read_selftest(ks);
1250 ks8851_init_mac(ks);
1251
1252 ret = request_irq(spi->irq, ks8851_irq, IRQF_TRIGGER_LOW,
1253 ndev->name, ks);
1254 if (ret < 0) {
1255 dev_err(&spi->dev, "failed to get irq\n");
1256 goto err_irq;
1257 }
1258
1259 ret = register_netdev(ndev);
1260 if (ret) {
1261 dev_err(&spi->dev, "failed to register network device\n");
1262 goto err_netdev;
1263 }
1264
1265 dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n",
1266 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
1267 ndev->dev_addr, ndev->irq);
1268
1269 return 0;
1270
1271
1272err_netdev:
1273 free_irq(ndev->irq, ndev);
1274
1275err_id:
1276err_irq:
1277 free_netdev(ndev);
1278 return ret;
1279}
1280
1281static int __devexit ks8851_remove(struct spi_device *spi)
1282{
1283 struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
1284
1285 if (netif_msg_drv(priv))
1286 dev_info(&spi->dev, "remove");
1287
1288 unregister_netdev(priv->netdev);
1289 free_irq(spi->irq, priv);
1290 free_netdev(priv->netdev);
1291
1292 return 0;
1293}
1294
1295static struct spi_driver ks8851_driver = {
1296 .driver = {
1297 .name = "ks8851",
1298 .owner = THIS_MODULE,
1299 },
1300 .probe = ks8851_probe,
1301 .remove = __devexit_p(ks8851_remove),
1302};
1303
1304static int __init ks8851_init(void)
1305{
1306 return spi_register_driver(&ks8851_driver);
1307}
1308
1309static void __exit ks8851_exit(void)
1310{
1311 spi_unregister_driver(&ks8851_driver);
1312}
1313
1314module_init(ks8851_init);
1315module_exit(ks8851_exit);
1316
1317MODULE_DESCRIPTION("KS8851 Network driver");
1318MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
1319MODULE_LICENSE("GPL");
1320
1321module_param_named(message, msg_enable, int, 0);
1322MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
new file mode 100644
index 000000000000..85abe147afbf
--- /dev/null
+++ b/drivers/net/ks8851.h
@@ -0,0 +1,296 @@
1/* drivers/net/ks8851.h
2 *
3 * Copyright 2009 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * KS8851 register definitions
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#define KS_CCR 0x08
14#define CCR_EEPROM (1 << 9)
15#define CCR_SPI (1 << 8)
16#define CCR_32PIN (1 << 0)
17
18/* MAC address registers */
19#define KS_MARL 0x10
20#define KS_MARM 0x12
21#define KS_MARH 0x14
22
23#define KS_OBCR 0x20
24#define OBCR_ODS_16mA (1 << 6)
25
26#define KS_EEPCR 0x22
27#define EEPCR_EESA (1 << 4)
28#define EEPCR_EESB (1 << 3)
29#define EEPCR_EEDO (1 << 2)
30#define EEPCR_EESCK (1 << 1)
31#define EEPCR_EECS (1 << 0)
32
33#define KS_MBIR 0x24
34#define MBIR_TXMBF (1 << 12)
35#define MBIR_TXMBFA (1 << 11)
36#define MBIR_RXMBF (1 << 4)
37#define MBIR_RXMBFA (1 << 3)
38
39#define KS_GRR 0x26
40#define GRR_QMU (1 << 1)
41#define GRR_GSR (1 << 0)
42
43#define KS_WFCR 0x2A
44#define WFCR_MPRXE (1 << 7)
45#define WFCR_WF3E (1 << 3)
46#define WFCR_WF2E (1 << 2)
47#define WFCR_WF1E (1 << 1)
48#define WFCR_WF0E (1 << 0)
49
50#define KS_WF0CRC0 0x30
51#define KS_WF0CRC1 0x32
52#define KS_WF0BM0 0x34
53#define KS_WF0BM1 0x36
54#define KS_WF0BM2 0x38
55#define KS_WF0BM3 0x3A
56
57#define KS_WF1CRC0 0x40
58#define KS_WF1CRC1 0x42
59#define KS_WF1BM0 0x44
60#define KS_WF1BM1 0x46
61#define KS_WF1BM2 0x48
62#define KS_WF1BM3 0x4A
63
64#define KS_WF2CRC0 0x50
65#define KS_WF2CRC1 0x52
66#define KS_WF2BM0 0x54
67#define KS_WF2BM1 0x56
68#define KS_WF2BM2 0x58
69#define KS_WF2BM3 0x5A
70
71#define KS_WF3CRC0 0x60
72#define KS_WF3CRC1 0x62
73#define KS_WF3BM0 0x64
74#define KS_WF3BM1 0x66
75#define KS_WF3BM2 0x68
76#define KS_WF3BM3 0x6A
77
78#define KS_TXCR 0x70
79#define TXCR_TCGICMP (1 << 8)
80#define TXCR_TCGUDP (1 << 7)
81#define TXCR_TCGTCP (1 << 6)
82#define TXCR_TCGIP (1 << 5)
83#define TXCR_FTXQ (1 << 4)
84#define TXCR_TXFCE (1 << 3)
85#define TXCR_TXPE (1 << 2)
86#define TXCR_TXCRC (1 << 1)
87#define TXCR_TXE (1 << 0)
88
89#define KS_TXSR 0x72
90#define TXSR_TXLC (1 << 13)
91#define TXSR_TXMC (1 << 12)
92#define TXSR_TXFID_MASK (0x3f << 0)
93#define TXSR_TXFID_SHIFT (0)
94#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
95
96#define KS_RXCR1 0x74
97#define RXCR1_FRXQ (1 << 15)
98#define RXCR1_RXUDPFCC (1 << 14)
99#define RXCR1_RXTCPFCC (1 << 13)
100#define RXCR1_RXIPFCC (1 << 12)
101#define RXCR1_RXPAFMA (1 << 11)
102#define RXCR1_RXFCE (1 << 10)
103#define RXCR1_RXEFE (1 << 9)
104#define RXCR1_RXMAFMA (1 << 8)
105#define RXCR1_RXBE (1 << 7)
106#define RXCR1_RXME (1 << 6)
107#define RXCR1_RXUE (1 << 5)
108#define RXCR1_RXAE (1 << 4)
109#define RXCR1_RXINVF (1 << 1)
110#define RXCR1_RXE (1 << 0)
111
112#define KS_RXCR2 0x76
113#define RXCR2_SRDBL_MASK (0x7 << 5)
114#define RXCR2_SRDBL_SHIFT (5)
115#define RXCR2_SRDBL_4B (0x0 << 5)
116#define RXCR2_SRDBL_8B (0x1 << 5)
117#define RXCR2_SRDBL_16B (0x2 << 5)
118#define RXCR2_SRDBL_32B (0x3 << 5)
119#define RXCR2_SRDBL_FRAME (0x4 << 5)
120#define RXCR2_IUFFP (1 << 4)
121#define RXCR2_RXIUFCEZ (1 << 3)
122#define RXCR2_UDPLFE (1 << 2)
123#define RXCR2_RXICMPFCC (1 << 1)
124#define RXCR2_RXSAF (1 << 0)
125
126#define KS_TXMIR 0x78
127
128#define KS_RXFHSR 0x7C
129#define RXFSHR_RXFV (1 << 15)
130#define RXFSHR_RXICMPFCS (1 << 13)
131#define RXFSHR_RXIPFCS (1 << 12)
132#define RXFSHR_RXTCPFCS (1 << 11)
133#define RXFSHR_RXUDPFCS (1 << 10)
134#define RXFSHR_RXBF (1 << 7)
135#define RXFSHR_RXMF (1 << 6)
136#define RXFSHR_RXUF (1 << 5)
137#define RXFSHR_RXMR (1 << 4)
138#define RXFSHR_RXFT (1 << 3)
139#define RXFSHR_RXFTL (1 << 2)
140#define RXFSHR_RXRF (1 << 1)
141#define RXFSHR_RXCE (1 << 0)
142
143#define KS_RXFHBCR 0x7E
144#define KS_TXQCR 0x80
145#define TXQCR_AETFE (1 << 2)
146#define TXQCR_TXQMAM (1 << 1)
147#define TXQCR_METFE (1 << 0)
148
149#define KS_RXQCR 0x82
150#define RXQCR_RXDTTS (1 << 12)
151#define RXQCR_RXDBCTS (1 << 11)
152#define RXQCR_RXFCTS (1 << 10)
153#define RXQCR_RXIPHTOE (1 << 9)
154#define RXQCR_RXDTTE (1 << 7)
155#define RXQCR_RXDBCTE (1 << 6)
156#define RXQCR_RXFCTE (1 << 5)
157#define RXQCR_ADRFE (1 << 4)
158#define RXQCR_SDA (1 << 3)
159#define RXQCR_RRXEF (1 << 0)
160
161#define KS_TXFDPR 0x84
162#define TXFDPR_TXFPAI (1 << 14)
163#define TXFDPR_TXFP_MASK (0x7ff << 0)
164#define TXFDPR_TXFP_SHIFT (0)
165
166#define KS_RXFDPR 0x86
167#define RXFDPR_RXFPAI (1 << 14)
168
169#define KS_RXDTTR 0x8C
170#define KS_RXDBCTR 0x8E
171
172#define KS_IER 0x90
173#define KS_ISR 0x92
174#define IRQ_LCI (1 << 15)
175#define IRQ_TXI (1 << 14)
176#define IRQ_RXI (1 << 13)
177#define IRQ_RXOI (1 << 11)
178#define IRQ_TXPSI (1 << 9)
179#define IRQ_RXPSI (1 << 8)
180#define IRQ_TXSAI (1 << 6)
181#define IRQ_RXWFDI (1 << 5)
182#define IRQ_RXMPDI (1 << 4)
183#define IRQ_LDI (1 << 3)
184#define IRQ_EDI (1 << 2)
185#define IRQ_SPIBEI (1 << 1)
186#define IRQ_DEDI (1 << 0)
187
188#define KS_RXFCTR 0x9C
189#define KS_RXFC 0x9D
190#define RXFCTR_RXFC_MASK (0xff << 8)
191#define RXFCTR_RXFC_SHIFT (8)
192#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
193#define RXFCTR_RXFCT_MASK (0xff << 0)
194#define RXFCTR_RXFCT_SHIFT (0)
195
196#define KS_TXNTFSR 0x9E
197
198#define KS_MAHTR0 0xA0
199#define KS_MAHTR1 0xA2
200#define KS_MAHTR2 0xA4
201#define KS_MAHTR3 0xA6
202
203#define KS_FCLWR 0xB0
204#define KS_FCHWR 0xB2
205#define KS_FCOWR 0xB4
206
207#define KS_CIDER 0xC0
208#define CIDER_ID 0x8870
209#define CIDER_REV_MASK (0x7 << 1)
210#define CIDER_REV_SHIFT (1)
211#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
212
213#define KS_CGCR 0xC6
214
215#define KS_IACR 0xC8
216#define IACR_RDEN (1 << 12)
217#define IACR_TSEL_MASK (0x3 << 10)
218#define IACR_TSEL_SHIFT (10)
219#define IACR_TSEL_MIB (0x3 << 10)
220#define IACR_ADDR_MASK (0x1f << 0)
221#define IACR_ADDR_SHIFT (0)
222
223#define KS_IADLR 0xD0
224#define KS_IAHDR 0xD2
225
226#define KS_PMECR 0xD4
227#define PMECR_PME_DELAY (1 << 14)
228#define PMECR_PME_POL (1 << 12)
229#define PMECR_WOL_WAKEUP (1 << 11)
230#define PMECR_WOL_MAGICPKT (1 << 10)
231#define PMECR_WOL_LINKUP (1 << 9)
232#define PMECR_WOL_ENERGY (1 << 8)
233#define PMECR_AUTO_WAKE_EN (1 << 7)
234#define PMECR_WAKEUP_NORMAL (1 << 6)
235#define PMECR_WKEVT_MASK (0xf << 2)
236#define PMECR_WKEVT_SHIFT (2)
237#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
238#define PMECR_WKEVT_ENERGY (0x1 << 2)
239#define PMECR_WKEVT_LINK (0x2 << 2)
240#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
241#define PMECR_WKEVT_FRAME (0x8 << 2)
242#define PMECR_PM_MASK (0x3 << 0)
243#define PMECR_PM_SHIFT (0)
244#define PMECR_PM_NORMAL (0x0 << 0)
245#define PMECR_PM_ENERGY (0x1 << 0)
246#define PMECR_PM_SOFTDOWN (0x2 << 0)
247#define PMECR_PM_POWERSAVE (0x3 << 0)
248
249/* Standard MII PHY data */
250#define KS_P1MBCR 0xE4
251#define KS_P1MBSR 0xE6
252#define KS_PHY1ILR 0xE8
253#define KS_PHY1IHR 0xEA
254#define KS_P1ANAR 0xEC
255#define KS_P1ANLPR 0xEE
256
257#define KS_P1SCLMD 0xF4
258#define P1SCLMD_LEDOFF (1 << 15)
259#define P1SCLMD_TXIDS (1 << 14)
260#define P1SCLMD_RESTARTAN (1 << 13)
261#define P1SCLMD_DISAUTOMDIX (1 << 10)
262#define P1SCLMD_FORCEMDIX (1 << 9)
263#define P1SCLMD_AUTONEGEN (1 << 7)
264#define P1SCLMD_FORCE100 (1 << 6)
265#define P1SCLMD_FORCEFDX (1 << 5)
266#define P1SCLMD_ADV_FLOW (1 << 4)
267#define P1SCLMD_ADV_100BT_FDX (1 << 3)
268#define P1SCLMD_ADV_100BT_HDX (1 << 2)
269#define P1SCLMD_ADV_10BT_FDX (1 << 1)
270#define P1SCLMD_ADV_10BT_HDX (1 << 0)
271
272#define KS_P1CR 0xF6
273#define P1CR_HP_MDIX (1 << 15)
274#define P1CR_REV_POL (1 << 13)
275#define P1CR_OP_100M (1 << 10)
276#define P1CR_OP_FDX (1 << 9)
277#define P1CR_OP_MDI (1 << 7)
278#define P1CR_AN_DONE (1 << 6)
279#define P1CR_LINK_GOOD (1 << 5)
280#define P1CR_PNTR_FLOW (1 << 4)
281#define P1CR_PNTR_100BT_FDX (1 << 3)
282#define P1CR_PNTR_100BT_HDX (1 << 2)
283#define P1CR_PNTR_10BT_FDX (1 << 1)
284#define P1CR_PNTR_10BT_HDX (1 << 0)
285
286/* TX Frame control */
287
288#define TXFR_TXIC (1 << 15)
289#define TXFR_TXFID_MASK (0x3f << 0)
290#define TXFR_TXFID_SHIFT (0)
291
292/* SPI frame opcodes */
293#define KS_SPIOP_RD (0x00)
294#define KS_SPIOP_WR (0x40)
295#define KS_SPIOP_RXFIFO (0x80)
296#define KS_SPIOP_TXFIFO (0xC0)
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index acd143da161d..61eabcac734c 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -179,7 +179,7 @@ static const struct net_device_ops macsonic_netdev_ops = {
179 .ndo_set_mac_address = eth_mac_addr, 179 .ndo_set_mac_address = eth_mac_addr,
180}; 180};
181 181
182static int __init macsonic_init(struct net_device *dev) 182static int __devinit macsonic_init(struct net_device *dev)
183{ 183{
184 struct sonic_local* lp = netdev_priv(dev); 184 struct sonic_local* lp = netdev_priv(dev);
185 185
@@ -223,7 +223,7 @@ static int __init macsonic_init(struct net_device *dev)
223 return 0; 223 return 0;
224} 224}
225 225
226static int __init mac_onboard_sonic_ethernet_addr(struct net_device *dev) 226static int __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
227{ 227{
228 struct sonic_local *lp = netdev_priv(dev); 228 struct sonic_local *lp = netdev_priv(dev);
229 const int prom_addr = ONBOARD_SONIC_PROM_BASE; 229 const int prom_addr = ONBOARD_SONIC_PROM_BASE;
@@ -288,7 +288,7 @@ static int __init mac_onboard_sonic_ethernet_addr(struct net_device *dev)
288 } else return 0; 288 } else return 0;
289} 289}
290 290
291static int __init mac_onboard_sonic_probe(struct net_device *dev) 291static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
292{ 292{
293 /* Bwahahaha */ 293 /* Bwahahaha */
294 static int once_is_more_than_enough; 294 static int once_is_more_than_enough;
@@ -409,7 +409,7 @@ static int __init mac_onboard_sonic_probe(struct net_device *dev)
409 return macsonic_init(dev); 409 return macsonic_init(dev);
410} 410}
411 411
412static int __init mac_nubus_sonic_ethernet_addr(struct net_device *dev, 412static int __devinit mac_nubus_sonic_ethernet_addr(struct net_device *dev,
413 unsigned long prom_addr, 413 unsigned long prom_addr,
414 int id) 414 int id)
415{ 415{
@@ -424,7 +424,7 @@ static int __init mac_nubus_sonic_ethernet_addr(struct net_device *dev,
424 return 0; 424 return 0;
425} 425}
426 426
427static int __init macsonic_ident(struct nubus_dev *ndev) 427static int __devinit macsonic_ident(struct nubus_dev *ndev)
428{ 428{
429 if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC && 429 if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
430 ndev->dr_sw == NUBUS_DRSW_SONIC_LC) 430 ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
@@ -449,7 +449,7 @@ static int __init macsonic_ident(struct nubus_dev *ndev)
449 return -1; 449 return -1;
450} 450}
451 451
452static int __init mac_nubus_sonic_probe(struct net_device *dev) 452static int __devinit mac_nubus_sonic_probe(struct net_device *dev)
453{ 453{
454 static int slots; 454 static int slots;
455 struct nubus_dev* ndev = NULL; 455 struct nubus_dev* ndev = NULL;
@@ -562,7 +562,7 @@ static int __init mac_nubus_sonic_probe(struct net_device *dev)
562 return macsonic_init(dev); 562 return macsonic_init(dev);
563} 563}
564 564
565static int __init mac_sonic_probe(struct platform_device *pdev) 565static int __devinit mac_sonic_probe(struct platform_device *pdev)
566{ 566{
567 struct net_device *dev; 567 struct net_device *dev;
568 struct sonic_local *lp; 568 struct sonic_local *lp;
@@ -575,6 +575,7 @@ static int __init mac_sonic_probe(struct platform_device *pdev)
575 lp = netdev_priv(dev); 575 lp = netdev_priv(dev);
576 lp->device = &pdev->dev; 576 lp->device = &pdev->dev;
577 SET_NETDEV_DEV(dev, &pdev->dev); 577 SET_NETDEV_DEV(dev, &pdev->dev);
578 platform_set_drvdata(pdev, dev);
578 579
579 /* This will catch fatal stuff like -ENOMEM as well as success */ 580 /* This will catch fatal stuff like -ENOMEM as well as success */
580 err = mac_onboard_sonic_probe(dev); 581 err = mac_onboard_sonic_probe(dev);
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index dc45e9856c35..6851bdb2ce29 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -14,6 +14,10 @@
14#include <linux/mdio.h> 14#include <linux/mdio.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16
17MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers");
18MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc.");
19MODULE_LICENSE("GPL");
20
17/** 21/**
18 * mdio45_probe - probe for an MDIO (clause 45) device 22 * mdio45_probe - probe for an MDIO (clause 45) device
19 * @mdio: MDIO interface 23 * @mdio: MDIO interface
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index 2845a0560b84..65ec77dc31f5 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -80,7 +80,9 @@ enum {
80 /* Bad management packet (silently discarded): */ 80 /* Bad management packet (silently discarded): */
81 CMD_STAT_BAD_PKT = 0x30, 81 CMD_STAT_BAD_PKT = 0x30,
82 /* More outstanding CQEs in CQ than new CQ size: */ 82 /* More outstanding CQEs in CQ than new CQ size: */
83 CMD_STAT_BAD_SIZE = 0x40 83 CMD_STAT_BAD_SIZE = 0x40,
84 /* Multi Function device support required: */
85 CMD_STAT_MULTI_FUNC_REQ = 0x50,
84}; 86};
85 87
86enum { 88enum {
@@ -128,6 +130,7 @@ static int mlx4_status_to_errno(u8 status)
128 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, 130 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
129 [CMD_STAT_BAD_PKT] = -EINVAL, 131 [CMD_STAT_BAD_PKT] = -EINVAL,
130 [CMD_STAT_BAD_SIZE] = -ENOMEM, 132 [CMD_STAT_BAD_SIZE] = -ENOMEM,
133 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
131 }; 134 };
132 135
133 if (status >= ARRAY_SIZE(trans_table) || 136 if (status >= ARRAY_SIZE(trans_table) ||
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 091f99052c91..86467b444ac6 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -220,7 +220,7 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
220{ 220{
221 cmd->autoneg = AUTONEG_DISABLE; 221 cmd->autoneg = AUTONEG_DISABLE;
222 cmd->supported = SUPPORTED_10000baseT_Full; 222 cmd->supported = SUPPORTED_10000baseT_Full;
223 cmd->advertising = SUPPORTED_10000baseT_Full; 223 cmd->advertising = ADVERTISED_1000baseT_Full;
224 if (netif_carrier_ok(dev)) { 224 if (netif_carrier_ok(dev)) {
225 cmd->speed = SPEED_10000; 225 cmd->speed = SPEED_10000;
226 cmd->duplex = DUPLEX_FULL; 226 cmd->duplex = DUPLEX_FULL;
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 08c43f2ae72b..5a88b3f57693 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -249,6 +249,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
249 pci_unmap_page(mdev->pdev, 249 pci_unmap_page(mdev->pdev,
250 (dma_addr_t) be64_to_cpu(data->addr), 250 (dma_addr_t) be64_to_cpu(data->addr),
251 frag->size, PCI_DMA_TODEVICE); 251 frag->size, PCI_DMA_TODEVICE);
252 ++data;
252 } 253 }
253 } 254 }
254 /* Stamp the freed descriptor */ 255 /* Stamp the freed descriptor */
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 018348c01193..dac621b1e9fc 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -729,7 +729,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
729 729
730 err = mlx4_QUERY_FW(dev); 730 err = mlx4_QUERY_FW(dev);
731 if (err) { 731 if (err) {
732 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 732 if (err == -EACCES)
733 mlx4_info(dev, "non-primary physical function, skipping.\n");
734 else
735 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
733 return err; 736 return err;
734 } 737 }
735 738
@@ -1285,6 +1288,7 @@ static struct pci_device_id mlx4_pci_table[] = {
1285 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 1288 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
1286 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 1289 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1287 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 1290 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1291 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1288 { 0, } 1292 { 0, }
1289}; 1293};
1290 1294
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index c9bfe4eea189..78c088331f57 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -130,8 +130,8 @@ static int full_duplex[MAX_UNITS];
130static const char version[] __devinitconst = 130static const char version[] __devinitconst =
131 KERN_INFO DRV_NAME " dp8381x driver, version " 131 KERN_INFO DRV_NAME " dp8381x driver, version "
132 DRV_VERSION ", " DRV_RELDATE "\n" 132 DRV_VERSION ", " DRV_RELDATE "\n"
133 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n" 133 " originally by Donald Becker <becker@scyld.com>\n"
134 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; 134 " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
135 135
136MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 136MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
137MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); 137MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 5c3e242428f1..992dbfffdb05 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -321,7 +321,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
321 } 321 }
322 322
323 if (ei_debug && version_printed++ == 0) 323 if (ei_debug && version_printed++ == 0)
324 printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2); 324 printk(KERN_INFO "%s%s", version1, version2);
325 325
326 printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr); 326 printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr);
327 327
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 970cedeb5f37..f86e05047d19 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -60,7 +60,18 @@
60#define _NETXEN_NIC_LINUX_SUBVERSION 30 60#define _NETXEN_NIC_LINUX_SUBVERSION 30
61#define NETXEN_NIC_LINUX_VERSIONID "4.0.30" 61#define NETXEN_NIC_LINUX_VERSIONID "4.0.30"
62 62
63#define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c)) 63#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
64#define _major(v) (((v) >> 24) & 0xff)
65#define _minor(v) (((v) >> 16) & 0xff)
66#define _build(v) ((v) & 0xffff)
67
68/* version in image has weird encoding:
69 * 7:0 - major
70 * 15:8 - minor
71 * 31:16 - build (little endian)
72 */
73#define NETXEN_DECODE_VERSION(v) \
74 NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
64 75
65#define NETXEN_NUM_FLASH_SECTORS (64) 76#define NETXEN_NUM_FLASH_SECTORS (64)
66#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024) 77#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
@@ -199,6 +210,7 @@
199#define NETXEN_CTX_SIGNATURE 0xdee0 210#define NETXEN_CTX_SIGNATURE 0xdee0
200#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0 211#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0
201#define NETXEN_CTX_RESET 0xbad0 212#define NETXEN_CTX_RESET 0xbad0
213#define NETXEN_CTX_D3_RESET 0xacc0
202#define NETXEN_RCV_PRODUCER(ringid) (ringid) 214#define NETXEN_RCV_PRODUCER(ringid) (ringid)
203 215
204#define PHAN_PEG_RCV_INITIALIZED 0xff01 216#define PHAN_PEG_RCV_INITIALIZED 0xff01
@@ -614,6 +626,7 @@ struct netxen_new_user_info {
614#define NX_P2_MN_ROMIMAGE 0 626#define NX_P2_MN_ROMIMAGE 0
615#define NX_P3_CT_ROMIMAGE 1 627#define NX_P3_CT_ROMIMAGE 1
616#define NX_P3_MN_ROMIMAGE 2 628#define NX_P3_MN_ROMIMAGE 2
629#define NX_FLASH_ROMIMAGE 3
617 630
618#define NETXEN_USER_START_OLD NETXEN_PXE_START /* for backward compatibility */ 631#define NETXEN_USER_START_OLD NETXEN_PXE_START /* for backward compatibility */
619 632
@@ -761,6 +774,8 @@ struct nx_host_tx_ring {
761 u32 crb_cmd_consumer; 774 u32 crb_cmd_consumer;
762 u32 num_desc; 775 u32 num_desc;
763 776
777 struct netdev_queue *txq;
778
764 struct netxen_cmd_buffer *cmd_buf_arr; 779 struct netxen_cmd_buffer *cmd_buf_arr;
765 struct cmd_desc_type0 *desc_head; 780 struct cmd_desc_type0 *desc_head;
766 dma_addr_t phys_addr; 781 dma_addr_t phys_addr;
@@ -1243,7 +1258,7 @@ struct netxen_adapter {
1243 u32 resv3; 1258 u32 resv3;
1244 1259
1245 u8 has_link_events; 1260 u8 has_link_events;
1246 u8 resv1; 1261 u8 fw_type;
1247 u16 tx_context_id; 1262 u16 tx_context_id;
1248 u16 mtu; 1263 u16 mtu;
1249 u16 is_up; 1264 u16 is_up;
@@ -1387,6 +1402,7 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter);
1387int netxen_initialize_adapter_offload(struct netxen_adapter *adapter); 1402int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
1388int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); 1403int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
1389int netxen_load_firmware(struct netxen_adapter *adapter); 1404int netxen_load_firmware(struct netxen_adapter *adapter);
1405int netxen_need_fw_reset(struct netxen_adapter *adapter);
1390void netxen_request_firmware(struct netxen_adapter *adapter); 1406void netxen_request_firmware(struct netxen_adapter *adapter);
1391void netxen_release_firmware(struct netxen_adapter *adapter); 1407void netxen_release_firmware(struct netxen_adapter *adapter);
1392int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); 1408int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 4754f5cffad0..9f8ae4719e2f 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -684,10 +684,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
684 goto err_out_free; 684 goto err_out_free;
685 } else { 685 } else {
686 err = netxen_init_old_ctx(adapter); 686 err = netxen_init_old_ctx(adapter);
687 if (err) { 687 if (err)
688 netxen_free_hw_resources(adapter); 688 goto err_out_free;
689 return err;
690 }
691 } 689 }
692 690
693 return 0; 691 return 0;
@@ -708,15 +706,18 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
708 int port = adapter->portnum; 706 int port = adapter->portnum;
709 707
710 if (adapter->fw_major >= 4) { 708 if (adapter->fw_major >= 4) {
711 nx_fw_cmd_destroy_tx_ctx(adapter);
712 nx_fw_cmd_destroy_rx_ctx(adapter); 709 nx_fw_cmd_destroy_rx_ctx(adapter);
710 nx_fw_cmd_destroy_tx_ctx(adapter);
713 } else { 711 } else {
714 netxen_api_lock(adapter); 712 netxen_api_lock(adapter);
715 NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), 713 NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
716 NETXEN_CTX_RESET | port); 714 NETXEN_CTX_D3_RESET | port);
717 netxen_api_unlock(adapter); 715 netxen_api_unlock(adapter);
718 } 716 }
719 717
718 /* Allow dma queues to drain after context reset */
719 msleep(20);
720
720 recv_ctx = &adapter->recv_ctx; 721 recv_ctx = &adapter->recv_ctx;
721 722
722 if (recv_ctx->hwctx != NULL) { 723 if (recv_ctx->hwctx != NULL) {
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 3cc047844af3..824103675648 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -853,6 +853,7 @@ enum {
853#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c)) 853#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c))
854 854
855#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) 855#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
856#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0))
856 857
857#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 858#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
858#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) 859#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index ce3b89d2cbb6..b9123d445c96 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -461,13 +461,14 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
461 i = 0; 461 i = 0;
462 462
463 tx_ring = adapter->tx_ring; 463 tx_ring = adapter->tx_ring;
464 netif_tx_lock_bh(adapter->netdev); 464 __netif_tx_lock_bh(tx_ring->txq);
465 465
466 producer = tx_ring->producer; 466 producer = tx_ring->producer;
467 consumer = tx_ring->sw_consumer; 467 consumer = tx_ring->sw_consumer;
468 468
469 if (nr_desc >= find_diff_among(producer, consumer, tx_ring->num_desc)) { 469 if (nr_desc >= netxen_tx_avail(tx_ring)) {
470 netif_tx_unlock_bh(adapter->netdev); 470 netif_tx_stop_queue(tx_ring->txq);
471 __netif_tx_unlock_bh(tx_ring->txq);
471 return -EBUSY; 472 return -EBUSY;
472 } 473 }
473 474
@@ -490,7 +491,7 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
490 491
491 netxen_nic_update_cmd_producer(adapter, tx_ring); 492 netxen_nic_update_cmd_producer(adapter, tx_ring);
492 493
493 netif_tx_unlock_bh(adapter->netdev); 494 __netif_tx_unlock_bh(tx_ring->txq);
494 495
495 return 0; 496 return 0;
496} 497}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 055bb61d6e77..7acf204e38c9 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -184,6 +184,13 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter)
184 kfree(recv_ctx->rds_rings); 184 kfree(recv_ctx->rds_rings);
185 185
186skip_rds: 186skip_rds:
187 if (recv_ctx->sds_rings == NULL)
188 goto skip_sds;
189
190 for(ring = 0; ring < adapter->max_sds_rings; ring++)
191 recv_ctx->sds_rings[ring].consumer = 0;
192
193skip_sds:
187 if (adapter->tx_ring == NULL) 194 if (adapter->tx_ring == NULL)
188 return; 195 return;
189 196
@@ -214,6 +221,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
214 adapter->tx_ring = tx_ring; 221 adapter->tx_ring = tx_ring;
215 222
216 tx_ring->num_desc = adapter->num_txd; 223 tx_ring->num_desc = adapter->num_txd;
224 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
217 225
218 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); 226 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
219 if (cmd_buf_arr == NULL) { 227 if (cmd_buf_arr == NULL) {
@@ -684,11 +692,84 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
684} 692}
685 693
686int 694int
695netxen_need_fw_reset(struct netxen_adapter *adapter)
696{
697 u32 count, old_count;
698 u32 val, version, major, minor, build;
699 int i, timeout;
700 u8 fw_type;
701
702 /* NX2031 firmware doesn't support heartbit */
703 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
704 return 1;
705
706 /* last attempt had failed */
707 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
708 return 1;
709
710 old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
711
712 for (i = 0; i < 10; i++) {
713
714 timeout = msleep_interruptible(200);
715 if (timeout) {
716 NXWR32(adapter, CRB_CMDPEG_STATE,
717 PHAN_INITIALIZE_FAILED);
718 return -EINTR;
719 }
720
721 count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
722 if (count != old_count)
723 break;
724 }
725
726 /* firmware is dead */
727 if (count == old_count)
728 return 1;
729
730 /* check if we have got newer or different file firmware */
731 if (adapter->fw) {
732
733 const struct firmware *fw = adapter->fw;
734
735 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
736 version = NETXEN_DECODE_VERSION(val);
737
738 major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
739 minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
740 build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
741
742 if (version > NETXEN_VERSION_CODE(major, minor, build))
743 return 1;
744
745 if (version == NETXEN_VERSION_CODE(major, minor, build)) {
746
747 val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
748 fw_type = (val & 0x4) ?
749 NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
750
751 if (adapter->fw_type != fw_type)
752 return 1;
753 }
754 }
755
756 return 0;
757}
758
759static char *fw_name[] = {
760 "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash",
761};
762
763int
687netxen_load_firmware(struct netxen_adapter *adapter) 764netxen_load_firmware(struct netxen_adapter *adapter)
688{ 765{
689 u64 *ptr64; 766 u64 *ptr64;
690 u32 i, flashaddr, size; 767 u32 i, flashaddr, size;
691 const struct firmware *fw = adapter->fw; 768 const struct firmware *fw = adapter->fw;
769 struct pci_dev *pdev = adapter->pdev;
770
771 dev_info(&pdev->dev, "loading firmware from %s\n",
772 fw_name[adapter->fw_type]);
692 773
693 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 774 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
694 NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); 775 NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
@@ -756,7 +837,7 @@ static int
756netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname) 837netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
757{ 838{
758 __le32 val; 839 __le32 val;
759 u32 major, minor, build, ver, min_ver, bios; 840 u32 ver, min_ver, bios;
760 struct pci_dev *pdev = adapter->pdev; 841 struct pci_dev *pdev = adapter->pdev;
761 const struct firmware *fw = adapter->fw; 842 const struct firmware *fw = adapter->fw;
762 843
@@ -768,21 +849,18 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
768 return -EINVAL; 849 return -EINVAL;
769 850
770 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); 851 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
771 major = (__force u32)val & 0xff;
772 minor = ((__force u32)val >> 8) & 0xff;
773 build = (__force u32)val >> 16;
774 852
775 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 853 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
776 min_ver = NETXEN_VERSION_CODE(4, 0, 216); 854 min_ver = NETXEN_VERSION_CODE(4, 0, 216);
777 else 855 else
778 min_ver = NETXEN_VERSION_CODE(3, 4, 216); 856 min_ver = NETXEN_VERSION_CODE(3, 4, 216);
779 857
780 ver = NETXEN_VERSION_CODE(major, minor, build); 858 ver = NETXEN_DECODE_VERSION(val);
781 859
782 if ((major > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) { 860 if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
783 dev_err(&pdev->dev, 861 dev_err(&pdev->dev,
784 "%s: firmware version %d.%d.%d unsupported\n", 862 "%s: firmware version %d.%d.%d unsupported\n",
785 fwname, major, minor, build); 863 fwname, _major(ver), _minor(ver), _build(ver));
786 return -EINVAL; 864 return -EINVAL;
787 } 865 }
788 866
@@ -798,22 +876,21 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
798 if (netxen_rom_fast_read(adapter, 876 if (netxen_rom_fast_read(adapter,
799 NX_FW_VERSION_OFFSET, (int *)&val)) 877 NX_FW_VERSION_OFFSET, (int *)&val))
800 return -EIO; 878 return -EIO;
801 major = (__force u32)val & 0xff; 879 val = NETXEN_DECODE_VERSION(val);
802 minor = ((__force u32)val >> 8) & 0xff; 880 if (val > ver) {
803 build = (__force u32)val >> 16; 881 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
804 if (NETXEN_VERSION_CODE(major, minor, build) > ver) 882 fwname);
805 return -EINVAL; 883 return -EINVAL;
884 }
806 885
807 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); 886 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
808 return 0; 887 return 0;
809} 888}
810 889
811static char *fw_name[] = { "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin" };
812
813void netxen_request_firmware(struct netxen_adapter *adapter) 890void netxen_request_firmware(struct netxen_adapter *adapter)
814{ 891{
815 u32 capability, flashed_ver; 892 u32 capability, flashed_ver;
816 int fw_type; 893 u8 fw_type;
817 struct pci_dev *pdev = adapter->pdev; 894 struct pci_dev *pdev = adapter->pdev;
818 int rc = 0; 895 int rc = 0;
819 896
@@ -830,6 +907,8 @@ request_mn:
830 907
831 netxen_rom_fast_read(adapter, 908 netxen_rom_fast_read(adapter,
832 NX_FW_VERSION_OFFSET, (int *)&flashed_ver); 909 NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
910 flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
911
833 if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { 912 if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
834 capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); 913 capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
835 if (capability & NX_PEG_TUNE_MN_PRESENT) { 914 if (capability & NX_PEG_TUNE_MN_PRESENT) {
@@ -838,6 +917,10 @@ request_mn:
838 } 917 }
839 } 918 }
840 919
920 fw_type = NX_FLASH_ROMIMAGE;
921 adapter->fw = NULL;
922 goto done;
923
841request_fw: 924request_fw:
842 rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev); 925 rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev);
843 if (rc != 0) { 926 if (rc != 0) {
@@ -846,6 +929,7 @@ request_fw:
846 goto request_mn; 929 goto request_mn;
847 } 930 }
848 931
932 fw_type = NX_FLASH_ROMIMAGE;
849 adapter->fw = NULL; 933 adapter->fw = NULL;
850 goto done; 934 goto done;
851 } 935 }
@@ -859,16 +943,13 @@ request_fw:
859 goto request_mn; 943 goto request_mn;
860 } 944 }
861 945
946 fw_type = NX_FLASH_ROMIMAGE;
862 adapter->fw = NULL; 947 adapter->fw = NULL;
863 goto done; 948 goto done;
864 } 949 }
865 950
866done: 951done:
867 if (adapter->fw) 952 adapter->fw_type = fw_type;
868 dev_info(&pdev->dev, "loading firmware from file %s\n",
869 fw_name[fw_type]);
870 else
871 dev_info(&pdev->dev, "loading firmware from flash\n");
872} 953}
873 954
874 955
@@ -1327,10 +1408,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1327 smp_mb(); 1408 smp_mb();
1328 1409
1329 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { 1410 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1330 netif_tx_lock(netdev); 1411 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1331 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) 1412 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
1332 netif_wake_queue(netdev); 1413 netif_wake_queue(netdev);
1333 netif_tx_unlock(netdev); 1414 __netif_tx_unlock(tx_ring->txq);
1334 } 1415 }
1335 } 1416 }
1336 /* 1417 /*
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 2919a2d12bf4..3cd8cfcf627b 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -215,13 +215,13 @@ netxen_napi_disable(struct netxen_adapter *adapter)
215 215
216 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 216 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
217 sds_ring = &recv_ctx->sds_rings[ring]; 217 sds_ring = &recv_ctx->sds_rings[ring];
218 napi_disable(&sds_ring->napi);
219 netxen_nic_disable_int(sds_ring); 218 netxen_nic_disable_int(sds_ring);
220 synchronize_irq(sds_ring->irq); 219 napi_synchronize(&sds_ring->napi);
220 napi_disable(&sds_ring->napi);
221 } 221 }
222} 222}
223 223
224static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) 224static int nx_set_dma_mask(struct netxen_adapter *adapter)
225{ 225{
226 struct pci_dev *pdev = adapter->pdev; 226 struct pci_dev *pdev = adapter->pdev;
227 uint64_t mask, cmask; 227 uint64_t mask, cmask;
@@ -229,19 +229,17 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
229 adapter->pci_using_dac = 0; 229 adapter->pci_using_dac = 0;
230 230
231 mask = DMA_BIT_MASK(32); 231 mask = DMA_BIT_MASK(32);
232 /*
233 * Consistent DMA mask is set to 32 bit because it cannot be set to
234 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
235 * come off this pool.
236 */
237 cmask = DMA_BIT_MASK(32); 232 cmask = DMA_BIT_MASK(32);
238 233
234 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
239#ifndef CONFIG_IA64 235#ifndef CONFIG_IA64
240 if (revision_id >= NX_P3_B0)
241 mask = DMA_BIT_MASK(39);
242 else if (revision_id == NX_P2_C1)
243 mask = DMA_BIT_MASK(35); 236 mask = DMA_BIT_MASK(35);
244#endif 237#endif
238 } else {
239 mask = DMA_BIT_MASK(39);
240 cmask = mask;
241 }
242
245 if (pci_set_dma_mask(pdev, mask) == 0 && 243 if (pci_set_dma_mask(pdev, mask) == 0 &&
246 pci_set_consistent_dma_mask(pdev, cmask) == 0) { 244 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
247 adapter->pci_using_dac = 1; 245 adapter->pci_using_dac = 1;
@@ -256,7 +254,7 @@ static int
256nx_update_dma_mask(struct netxen_adapter *adapter) 254nx_update_dma_mask(struct netxen_adapter *adapter)
257{ 255{
258 int change, shift, err; 256 int change, shift, err;
259 uint64_t mask, old_mask; 257 uint64_t mask, old_mask, old_cmask;
260 struct pci_dev *pdev = adapter->pdev; 258 struct pci_dev *pdev = adapter->pdev;
261 259
262 change = 0; 260 change = 0;
@@ -272,14 +270,29 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
272 270
273 if (change) { 271 if (change) {
274 old_mask = pdev->dma_mask; 272 old_mask = pdev->dma_mask;
273 old_cmask = pdev->dev.coherent_dma_mask;
274
275 mask = (1ULL<<(32+shift)) - 1; 275 mask = (1ULL<<(32+shift)) - 1;
276 276
277 err = pci_set_dma_mask(pdev, mask); 277 err = pci_set_dma_mask(pdev, mask);
278 if (err) 278 if (err)
279 return pci_set_dma_mask(pdev, old_mask); 279 goto err_out;
280
281 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
282
283 err = pci_set_consistent_dma_mask(pdev, mask);
284 if (err)
285 goto err_out;
286 }
287 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
280 } 288 }
281 289
282 return 0; 290 return 0;
291
292err_out:
293 pci_set_dma_mask(pdev, old_mask);
294 pci_set_consistent_dma_mask(pdev, old_cmask);
295 return err;
283} 296}
284 297
285static void netxen_check_options(struct netxen_adapter *adapter) 298static void netxen_check_options(struct netxen_adapter *adapter)
@@ -718,6 +731,10 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
718 if (request_fw) 731 if (request_fw)
719 netxen_request_firmware(adapter); 732 netxen_request_firmware(adapter);
720 733
734 err = netxen_need_fw_reset(adapter);
735 if (err <= 0)
736 return err;
737
721 if (first_boot != 0x55555555) { 738 if (first_boot != 0x55555555) {
722 NXWR32(adapter, CRB_CMDPEG_STATE, 0); 739 NXWR32(adapter, CRB_CMDPEG_STATE, 0);
723 netxen_pinit_from_rom(adapter, 0); 740 netxen_pinit_from_rom(adapter, 0);
@@ -829,11 +846,11 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
829 846
830 adapter->ahw.linkup = 0; 847 adapter->ahw.linkup = 0;
831 848
832 netxen_napi_enable(adapter);
833
834 if (adapter->max_sds_rings > 1) 849 if (adapter->max_sds_rings > 1)
835 netxen_config_rss(adapter, 1); 850 netxen_config_rss(adapter, 1);
836 851
852 netxen_napi_enable(adapter);
853
837 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) 854 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
838 netxen_linkevent_request(adapter, 1); 855 netxen_linkevent_request(adapter, 1);
839 else 856 else
@@ -847,8 +864,9 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
847static void 864static void
848netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) 865netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
849{ 866{
867 spin_lock(&adapter->tx_clean_lock);
850 netif_carrier_off(netdev); 868 netif_carrier_off(netdev);
851 netif_stop_queue(netdev); 869 netif_tx_disable(netdev);
852 870
853 if (adapter->stop_port) 871 if (adapter->stop_port)
854 adapter->stop_port(adapter); 872 adapter->stop_port(adapter);
@@ -859,9 +877,10 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
859 netxen_napi_disable(adapter); 877 netxen_napi_disable(adapter);
860 878
861 netxen_release_tx_buffers(adapter); 879 netxen_release_tx_buffers(adapter);
880 spin_unlock(&adapter->tx_clean_lock);
862 881
863 FLUSH_SCHEDULED_WORK();
864 del_timer_sync(&adapter->watchdog_timer); 882 del_timer_sync(&adapter->watchdog_timer);
883 FLUSH_SCHEDULED_WORK();
865} 884}
866 885
867 886
@@ -939,8 +958,8 @@ err_out_free_sw:
939static void 958static void
940netxen_nic_detach(struct netxen_adapter *adapter) 959netxen_nic_detach(struct netxen_adapter *adapter)
941{ 960{
942 netxen_release_rx_buffers(adapter);
943 netxen_free_hw_resources(adapter); 961 netxen_free_hw_resources(adapter);
962 netxen_release_rx_buffers(adapter);
944 netxen_nic_free_irq(adapter); 963 netxen_nic_free_irq(adapter);
945 netxen_free_sw_resources(adapter); 964 netxen_free_sw_resources(adapter);
946 965
@@ -1000,7 +1019,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1000 revision_id = pdev->revision; 1019 revision_id = pdev->revision;
1001 adapter->ahw.revision_id = revision_id; 1020 adapter->ahw.revision_id = revision_id;
1002 1021
1003 err = nx_set_dma_mask(adapter, revision_id); 1022 err = nx_set_dma_mask(adapter);
1004 if (err) 1023 if (err)
1005 goto err_out_free_netdev; 1024 goto err_out_free_netdev;
1006 1025
@@ -1529,10 +1548,12 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1529 printk(KERN_ALERT 1548 printk(KERN_ALERT
1530 "%s: Device temperature %d degrees C exceeds" 1549 "%s: Device temperature %d degrees C exceeds"
1531 " maximum allowed. Hardware has been shut down.\n", 1550 " maximum allowed. Hardware has been shut down.\n",
1532 netxen_nic_driver_name, temp_val); 1551 netdev->name, temp_val);
1552
1553 netif_device_detach(netdev);
1554 netxen_nic_down(adapter, netdev);
1555 netxen_nic_detach(adapter);
1533 1556
1534 netif_carrier_off(netdev);
1535 netif_stop_queue(netdev);
1536 rv = 1; 1557 rv = 1;
1537 } else if (temp_state == NX_TEMP_WARN) { 1558 } else if (temp_state == NX_TEMP_WARN) {
1538 if (adapter->temp == NX_TEMP_NORMAL) { 1559 if (adapter->temp == NX_TEMP_NORMAL) {
@@ -1540,13 +1561,13 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1540 "%s: Device temperature %d degrees C " 1561 "%s: Device temperature %d degrees C "
1541 "exceeds operating range." 1562 "exceeds operating range."
1542 " Immediate action needed.\n", 1563 " Immediate action needed.\n",
1543 netxen_nic_driver_name, temp_val); 1564 netdev->name, temp_val);
1544 } 1565 }
1545 } else { 1566 } else {
1546 if (adapter->temp == NX_TEMP_WARN) { 1567 if (adapter->temp == NX_TEMP_WARN) {
1547 printk(KERN_INFO 1568 printk(KERN_INFO
1548 "%s: Device temperature is now %d degrees C" 1569 "%s: Device temperature is now %d degrees C"
1549 " in normal range.\n", netxen_nic_driver_name, 1570 " in normal range.\n", netdev->name,
1550 temp_val); 1571 temp_val);
1551 } 1572 }
1552 } 1573 }
@@ -1619,7 +1640,7 @@ void netxen_watchdog_task(struct work_struct *work)
1619 struct netxen_adapter *adapter = 1640 struct netxen_adapter *adapter =
1620 container_of(work, struct netxen_adapter, watchdog_task); 1641 container_of(work, struct netxen_adapter, watchdog_task);
1621 1642
1622 if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter)) 1643 if (netxen_nic_check_temp(adapter))
1623 return; 1644 return;
1624 1645
1625 if (!adapter->has_link_events) 1646 if (!adapter->has_link_events)
@@ -1641,6 +1662,9 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1641 struct netxen_adapter *adapter = 1662 struct netxen_adapter *adapter =
1642 container_of(work, struct netxen_adapter, tx_timeout_task); 1663 container_of(work, struct netxen_adapter, tx_timeout_task);
1643 1664
1665 if (!netif_running(adapter->netdev))
1666 return;
1667
1644 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", 1668 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
1645 netxen_nic_driver_name, adapter->netdev->name); 1669 netxen_nic_driver_name, adapter->netdev->name);
1646 1670
@@ -1753,7 +1777,8 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
1753 1777
1754 if ((work_done < budget) && tx_complete) { 1778 if ((work_done < budget) && tx_complete) {
1755 napi_complete(&sds_ring->napi); 1779 napi_complete(&sds_ring->napi);
1756 netxen_nic_enable_int(sds_ring); 1780 if (netif_running(adapter->netdev))
1781 netxen_nic_enable_int(sds_ring);
1757 } 1782 }
1758 1783
1759 return work_done; 1784 return work_done;
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 8c1f6988f398..89f7b2ad5231 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -105,7 +105,7 @@ IVc. Errata
105 105
106static char version[] __devinitdata = 106static char version[] __devinitdata =
107KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n" 107KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
108KERN_INFO " Support available from http://foo.com/bar/baz.html\n"; 108" Support available from http://foo.com/bar/baz.html\n";
109 109
110/* define to 1 to enable PIO instead of MMIO */ 110/* define to 1 to enable PIO instead of MMIO */
111#undef USE_IO_OPS 111#undef USE_IO_OPS
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index ec7cf5ac4f05..690b9c76d34e 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -156,6 +156,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev);
156static int el3_rx(struct net_device *dev); 156static int el3_rx(struct net_device *dev);
157static int el3_close(struct net_device *dev); 157static int el3_close(struct net_device *dev);
158static void el3_tx_timeout(struct net_device *dev); 158static void el3_tx_timeout(struct net_device *dev);
159static void set_rx_mode(struct net_device *dev);
159static void set_multicast_list(struct net_device *dev); 160static void set_multicast_list(struct net_device *dev);
160static const struct ethtool_ops netdev_ethtool_ops; 161static const struct ethtool_ops netdev_ethtool_ops;
161 162
@@ -488,8 +489,7 @@ static void tc589_reset(struct net_device *dev)
488 /* Switch to register set 1 for normal use. */ 489 /* Switch to register set 1 for normal use. */
489 EL3WINDOW(1); 490 EL3WINDOW(1);
490 491
491 /* Accept b-cast and phys addr only. */ 492 set_rx_mode(dev);
492 outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
493 outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ 493 outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
494 outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ 494 outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
495 outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ 495 outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
@@ -700,7 +700,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
700 if (fifo_diag & 0x2000) { 700 if (fifo_diag & 0x2000) {
701 /* Rx underrun */ 701 /* Rx underrun */
702 tc589_wait_for_completion(dev, RxReset); 702 tc589_wait_for_completion(dev, RxReset);
703 set_multicast_list(dev); 703 set_rx_mode(dev);
704 outw(RxEnable, ioaddr + EL3_CMD); 704 outw(RxEnable, ioaddr + EL3_CMD);
705 } 705 }
706 outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); 706 outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
@@ -905,14 +905,11 @@ static int el3_rx(struct net_device *dev)
905 return 0; 905 return 0;
906} 906}
907 907
908static void set_multicast_list(struct net_device *dev) 908static void set_rx_mode(struct net_device *dev)
909{ 909{
910 struct el3_private *lp = netdev_priv(dev);
911 struct pcmcia_device *link = lp->p_dev;
912 unsigned int ioaddr = dev->base_addr; 910 unsigned int ioaddr = dev->base_addr;
913 u16 opts = SetRxFilter | RxStation | RxBroadcast; 911 u16 opts = SetRxFilter | RxStation | RxBroadcast;
914 912
915 if (!pcmcia_dev_present(link)) return;
916 if (dev->flags & IFF_PROMISC) 913 if (dev->flags & IFF_PROMISC)
917 opts |= RxMulticast | RxProm; 914 opts |= RxMulticast | RxProm;
918 else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) 915 else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
@@ -920,6 +917,16 @@ static void set_multicast_list(struct net_device *dev)
920 outw(opts, ioaddr + EL3_CMD); 917 outw(opts, ioaddr + EL3_CMD);
921} 918}
922 919
920static void set_multicast_list(struct net_device *dev)
921{
922 struct el3_private *priv = netdev_priv(dev);
923 unsigned long flags;
924
925 spin_lock_irqsave(&priv->lock, flags);
926 set_rx_mode(dev);
927 spin_unlock_irqrestore(&priv->lock, flags);
928}
929
923static int el3_close(struct net_device *dev) 930static int el3_close(struct net_device *dev)
924{ 931{
925 struct el3_private *lp = netdev_priv(dev); 932 struct el3_private *lp = netdev_priv(dev);
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index f51944b28cfa..06618af1a468 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -298,14 +298,11 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
298 298
299 strcpy(info->node.dev_name, dev->name); 299 strcpy(info->node.dev_name, dev->name);
300 300
301 printk(KERN_INFO "%s: port %#3lx, irq %d,", 301 printk(KERN_INFO
302 dev->name, dev->base_addr, dev->irq); 302 "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
303 printk (" mmio %#5lx,", (u_long)ti->mmio); 303 dev->name, dev->base_addr, dev->irq,
304 printk (" sram %#5lx,", (u_long)ti->sram_base << 12); 304 (u_long)ti->mmio, (u_long)(ti->sram_base << 12),
305 printk ("\n" KERN_INFO " hwaddr="); 305 dev->dev_addr);
306 for (i = 0; i < TR_ALEN; i++)
307 printk("%02X", dev->dev_addr[i]);
308 printk("\n");
309 return 0; 306 return 0;
310 307
311cs_failed: 308cs_failed:
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 02ef63ed1f99..36de91baf238 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1425,15 +1425,12 @@ static void BuildLAF(int *ladrf, int *adr)
1425 ladrf[byte] |= (1 << (hashcode & 7)); 1425 ladrf[byte] |= (1 << (hashcode & 7));
1426 1426
1427#ifdef PCMCIA_DEBUG 1427#ifdef PCMCIA_DEBUG
1428 if (pc_debug > 2) { 1428 if (pc_debug > 2)
1429 printk(KERN_DEBUG " adr ="); 1429 printk(KERN_DEBUG " adr =%pM\n", adr);
1430 for (i = 0; i < 6; i++) 1430 printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
1431 printk(" %02X", adr[i]); 1431 for (i = 0; i < 8; i++)
1432 printk("\n" KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63]" 1432 printk(KERN_CONT " %02X", ladrf[i]);
1433 " =", hashcode); 1433 printk(KERN_CONT "\n");
1434 for (i = 0; i < 8; i++)
1435 printk(" %02X", ladrf[i]);
1436 printk("\n");
1437 } 1434 }
1438#endif 1435#endif
1439} /* BuildLAF */ 1436} /* BuildLAF */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 652a36888361..9ef1c1bfa83d 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1727,6 +1727,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1727 PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50), 1727 PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50),
1728 PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110), 1728 PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110),
1729 PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941), 1729 PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941),
1730 PCMCIA_DEVICE_PROD_ID12("RIOS Systems Co.", "PC CARD3 ETHERNET", 0x7dd33481, 0x10b41826),
1730 PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df), 1731 PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df),
1731 PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0), 1732 PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0),
1732 PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd), 1733 PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd),
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 1c35e1d637a0..a646a445fda9 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -485,7 +485,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
485 &new_ring_dma_addr); 485 &new_ring_dma_addr);
486 if (new_tx_ring == NULL) { 486 if (new_tx_ring == NULL) {
487 if (netif_msg_drv(lp)) 487 if (netif_msg_drv(lp))
488 printk("\n" KERN_ERR 488 printk(KERN_ERR
489 "%s: Consistent memory allocation failed.\n", 489 "%s: Consistent memory allocation failed.\n",
490 dev->name); 490 dev->name);
491 return; 491 return;
@@ -496,7 +496,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
496 GFP_ATOMIC); 496 GFP_ATOMIC);
497 if (!new_dma_addr_list) { 497 if (!new_dma_addr_list) {
498 if (netif_msg_drv(lp)) 498 if (netif_msg_drv(lp))
499 printk("\n" KERN_ERR 499 printk(KERN_ERR
500 "%s: Memory allocation failed.\n", dev->name); 500 "%s: Memory allocation failed.\n", dev->name);
501 goto free_new_tx_ring; 501 goto free_new_tx_ring;
502 } 502 }
@@ -505,7 +505,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
505 GFP_ATOMIC); 505 GFP_ATOMIC);
506 if (!new_skb_list) { 506 if (!new_skb_list) {
507 if (netif_msg_drv(lp)) 507 if (netif_msg_drv(lp))
508 printk("\n" KERN_ERR 508 printk(KERN_ERR
509 "%s: Memory allocation failed.\n", dev->name); 509 "%s: Memory allocation failed.\n", dev->name);
510 goto free_new_lists; 510 goto free_new_lists;
511 } 511 }
@@ -563,7 +563,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
563 &new_ring_dma_addr); 563 &new_ring_dma_addr);
564 if (new_rx_ring == NULL) { 564 if (new_rx_ring == NULL) {
565 if (netif_msg_drv(lp)) 565 if (netif_msg_drv(lp))
566 printk("\n" KERN_ERR 566 printk(KERN_ERR
567 "%s: Consistent memory allocation failed.\n", 567 "%s: Consistent memory allocation failed.\n",
568 dev->name); 568 dev->name);
569 return; 569 return;
@@ -574,7 +574,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
574 GFP_ATOMIC); 574 GFP_ATOMIC);
575 if (!new_dma_addr_list) { 575 if (!new_dma_addr_list) {
576 if (netif_msg_drv(lp)) 576 if (netif_msg_drv(lp))
577 printk("\n" KERN_ERR 577 printk(KERN_ERR
578 "%s: Memory allocation failed.\n", dev->name); 578 "%s: Memory allocation failed.\n", dev->name);
579 goto free_new_rx_ring; 579 goto free_new_rx_ring;
580 } 580 }
@@ -583,7 +583,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
583 GFP_ATOMIC); 583 GFP_ATOMIC);
584 if (!new_skb_list) { 584 if (!new_skb_list) {
585 if (netif_msg_drv(lp)) 585 if (netif_msg_drv(lp))
586 printk("\n" KERN_ERR 586 printk(KERN_ERR
587 "%s: Memory allocation failed.\n", dev->name); 587 "%s: Memory allocation failed.\n", dev->name);
588 goto free_new_lists; 588 goto free_new_lists;
589 } 589 }
@@ -1611,8 +1611,11 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1611 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 1611 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
1612 && pcnet32_dwio_check(ioaddr)) { 1612 && pcnet32_dwio_check(ioaddr)) {
1613 a = &pcnet32_dwio; 1613 a = &pcnet32_dwio;
1614 } else 1614 } else {
1615 if (pcnet32_debug & NETIF_MSG_PROBE)
1616 printk(KERN_ERR PFX "No access methods\n");
1615 goto err_release_region; 1617 goto err_release_region;
1618 }
1616 } 1619 }
1617 1620
1618 chip_version = 1621 chip_version =
@@ -1719,7 +1722,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1719 ret = -ENOMEM; 1722 ret = -ENOMEM;
1720 goto err_release_region; 1723 goto err_release_region;
1721 } 1724 }
1722 SET_NETDEV_DEV(dev, &pdev->dev); 1725
1726 if (pdev)
1727 SET_NETDEV_DEV(dev, &pdev->dev);
1723 1728
1724 if (pcnet32_debug & NETIF_MSG_PROBE) 1729 if (pcnet32_debug & NETIF_MSG_PROBE)
1725 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); 1730 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
@@ -1766,38 +1771,38 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1766 /* Version 0x2623 and 0x2624 */ 1771 /* Version 0x2623 and 0x2624 */
1767 if (((chip_version + 1) & 0xfffe) == 0x2624) { 1772 if (((chip_version + 1) & 0xfffe) == 0x2624) {
1768 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ 1773 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
1769 printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i); 1774 printk(KERN_INFO " tx_start_pt(0x%04x):", i);
1770 switch (i >> 10) { 1775 switch (i >> 10) {
1771 case 0: 1776 case 0:
1772 printk(" 20 bytes,"); 1777 printk(KERN_CONT " 20 bytes,");
1773 break; 1778 break;
1774 case 1: 1779 case 1:
1775 printk(" 64 bytes,"); 1780 printk(KERN_CONT " 64 bytes,");
1776 break; 1781 break;
1777 case 2: 1782 case 2:
1778 printk(" 128 bytes,"); 1783 printk(KERN_CONT " 128 bytes,");
1779 break; 1784 break;
1780 case 3: 1785 case 3:
1781 printk("~220 bytes,"); 1786 printk(KERN_CONT "~220 bytes,");
1782 break; 1787 break;
1783 } 1788 }
1784 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ 1789 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
1785 printk(" BCR18(%x):", i & 0xffff); 1790 printk(KERN_CONT " BCR18(%x):", i & 0xffff);
1786 if (i & (1 << 5)) 1791 if (i & (1 << 5))
1787 printk("BurstWrEn "); 1792 printk(KERN_CONT "BurstWrEn ");
1788 if (i & (1 << 6)) 1793 if (i & (1 << 6))
1789 printk("BurstRdEn "); 1794 printk(KERN_CONT "BurstRdEn ");
1790 if (i & (1 << 7)) 1795 if (i & (1 << 7))
1791 printk("DWordIO "); 1796 printk(KERN_CONT "DWordIO ");
1792 if (i & (1 << 11)) 1797 if (i & (1 << 11))
1793 printk("NoUFlow "); 1798 printk(KERN_CONT "NoUFlow ");
1794 i = a->read_bcr(ioaddr, 25); 1799 i = a->read_bcr(ioaddr, 25);
1795 printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8); 1800 printk(KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
1796 i = a->read_bcr(ioaddr, 26); 1801 i = a->read_bcr(ioaddr, 26);
1797 printk(" SRAM_BND=0x%04x,", i << 8); 1802 printk(KERN_CONT " SRAM_BND=0x%04x,", i << 8);
1798 i = a->read_bcr(ioaddr, 27); 1803 i = a->read_bcr(ioaddr, 27);
1799 if (i & (1 << 14)) 1804 if (i & (1 << 14))
1800 printk("LowLatRx"); 1805 printk(KERN_CONT "LowLatRx");
1801 } 1806 }
1802 } 1807 }
1803 1808
@@ -1818,7 +1823,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1818 1823
1819 spin_lock_init(&lp->lock); 1824 spin_lock_init(&lp->lock);
1820 1825
1821 SET_NETDEV_DEV(dev, &pdev->dev);
1822 lp->name = chipname; 1826 lp->name = chipname;
1823 lp->shared_irq = shared; 1827 lp->shared_irq = shared;
1824 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ 1828 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
@@ -1852,12 +1856,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1852 ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) 1856 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
1853 lp->options |= PCNET32_PORT_FD; 1857 lp->options |= PCNET32_PORT_FD;
1854 1858
1855 if (!a) {
1856 if (pcnet32_debug & NETIF_MSG_PROBE)
1857 printk(KERN_ERR PFX "No access methods\n");
1858 ret = -ENODEV;
1859 goto err_free_consistent;
1860 }
1861 lp->a = *a; 1859 lp->a = *a;
1862 1860
1863 /* prior to register_netdev, dev->name is not yet correct */ 1861 /* prior to register_netdev, dev->name is not yet correct */
@@ -1973,14 +1971,13 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1973 1971
1974 return 0; 1972 return 0;
1975 1973
1976 err_free_ring: 1974err_free_ring:
1977 pcnet32_free_ring(dev); 1975 pcnet32_free_ring(dev);
1978 err_free_consistent:
1979 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 1976 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
1980 lp->init_block, lp->init_dma_addr); 1977 lp->init_block, lp->init_dma_addr);
1981 err_free_netdev: 1978err_free_netdev:
1982 free_netdev(dev); 1979 free_netdev(dev);
1983 err_release_region: 1980err_release_region:
1984 release_region(ioaddr, PCNET32_TOTAL_SIZE); 1981 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1985 return ret; 1982 return ret;
1986} 1983}
@@ -1996,7 +1993,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
1996 &lp->tx_ring_dma_addr); 1993 &lp->tx_ring_dma_addr);
1997 if (lp->tx_ring == NULL) { 1994 if (lp->tx_ring == NULL) {
1998 if (netif_msg_drv(lp)) 1995 if (netif_msg_drv(lp))
1999 printk("\n" KERN_ERR PFX 1996 printk(KERN_ERR PFX
2000 "%s: Consistent memory allocation failed.\n", 1997 "%s: Consistent memory allocation failed.\n",
2001 name); 1998 name);
2002 return -ENOMEM; 1999 return -ENOMEM;
@@ -2008,7 +2005,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
2008 &lp->rx_ring_dma_addr); 2005 &lp->rx_ring_dma_addr);
2009 if (lp->rx_ring == NULL) { 2006 if (lp->rx_ring == NULL) {
2010 if (netif_msg_drv(lp)) 2007 if (netif_msg_drv(lp))
2011 printk("\n" KERN_ERR PFX 2008 printk(KERN_ERR PFX
2012 "%s: Consistent memory allocation failed.\n", 2009 "%s: Consistent memory allocation failed.\n",
2013 name); 2010 name);
2014 return -ENOMEM; 2011 return -ENOMEM;
@@ -2018,7 +2015,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
2018 GFP_ATOMIC); 2015 GFP_ATOMIC);
2019 if (!lp->tx_dma_addr) { 2016 if (!lp->tx_dma_addr) {
2020 if (netif_msg_drv(lp)) 2017 if (netif_msg_drv(lp))
2021 printk("\n" KERN_ERR PFX 2018 printk(KERN_ERR PFX
2022 "%s: Memory allocation failed.\n", name); 2019 "%s: Memory allocation failed.\n", name);
2023 return -ENOMEM; 2020 return -ENOMEM;
2024 } 2021 }
@@ -2027,7 +2024,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
2027 GFP_ATOMIC); 2024 GFP_ATOMIC);
2028 if (!lp->rx_dma_addr) { 2025 if (!lp->rx_dma_addr) {
2029 if (netif_msg_drv(lp)) 2026 if (netif_msg_drv(lp))
2030 printk("\n" KERN_ERR PFX 2027 printk(KERN_ERR PFX
2031 "%s: Memory allocation failed.\n", name); 2028 "%s: Memory allocation failed.\n", name);
2032 return -ENOMEM; 2029 return -ENOMEM;
2033 } 2030 }
@@ -2036,7 +2033,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
2036 GFP_ATOMIC); 2033 GFP_ATOMIC);
2037 if (!lp->tx_skbuff) { 2034 if (!lp->tx_skbuff) {
2038 if (netif_msg_drv(lp)) 2035 if (netif_msg_drv(lp))
2039 printk("\n" KERN_ERR PFX 2036 printk(KERN_ERR PFX
2040 "%s: Memory allocation failed.\n", name); 2037 "%s: Memory allocation failed.\n", name);
2041 return -ENOMEM; 2038 return -ENOMEM;
2042 } 2039 }
@@ -2045,7 +2042,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
2045 GFP_ATOMIC); 2042 GFP_ATOMIC);
2046 if (!lp->rx_skbuff) { 2043 if (!lp->rx_skbuff) {
2047 if (netif_msg_drv(lp)) 2044 if (netif_msg_drv(lp))
2048 printk("\n" KERN_ERR PFX 2045 printk(KERN_ERR PFX
2049 "%s: Memory allocation failed.\n", name); 2046 "%s: Memory allocation failed.\n", name);
2050 return -ENOMEM; 2047 return -ENOMEM;
2051 } 2048 }
@@ -2089,6 +2086,7 @@ static void pcnet32_free_ring(struct net_device *dev)
2089static int pcnet32_open(struct net_device *dev) 2086static int pcnet32_open(struct net_device *dev)
2090{ 2087{
2091 struct pcnet32_private *lp = netdev_priv(dev); 2088 struct pcnet32_private *lp = netdev_priv(dev);
2089 struct pci_dev *pdev = lp->pci_dev;
2092 unsigned long ioaddr = dev->base_addr; 2090 unsigned long ioaddr = dev->base_addr;
2093 u16 val; 2091 u16 val;
2094 int i; 2092 int i;
@@ -2149,9 +2147,9 @@ static int pcnet32_open(struct net_device *dev)
2149 lp->a.write_csr(ioaddr, 124, val); 2147 lp->a.write_csr(ioaddr, 124, val);
2150 2148
2151 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ 2149 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
2152 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && 2150 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
2153 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || 2151 (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
2154 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { 2152 pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
2155 if (lp->options & PCNET32_PORT_ASEL) { 2153 if (lp->options & PCNET32_PORT_ASEL) {
2156 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; 2154 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
2157 if (netif_msg_link(lp)) 2155 if (netif_msg_link(lp))
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 33984b737233..22cdd451fb82 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -30,6 +30,7 @@
30 30
31#ifdef CONFIG_OF_GPIO 31#ifdef CONFIG_OF_GPIO
32#include <linux/of_gpio.h> 32#include <linux/of_gpio.h>
33#include <linux/of_mdio.h>
33#include <linux/of_platform.h> 34#include <linux/of_platform.h>
34#endif 35#endif
35 36
@@ -81,13 +82,12 @@ static struct mdiobb_ops mdio_gpio_ops = {
81 .get_mdio_data = mdio_get, 82 .get_mdio_data = mdio_get,
82}; 83};
83 84
84static int __devinit mdio_gpio_bus_init(struct device *dev, 85static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
85 struct mdio_gpio_platform_data *pdata, 86 struct mdio_gpio_platform_data *pdata,
86 int bus_id) 87 int bus_id)
87{ 88{
88 struct mii_bus *new_bus; 89 struct mii_bus *new_bus;
89 struct mdio_gpio_info *bitbang; 90 struct mdio_gpio_info *bitbang;
90 int ret = -ENOMEM;
91 int i; 91 int i;
92 92
93 bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL); 93 bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL);
@@ -104,8 +104,6 @@ static int __devinit mdio_gpio_bus_init(struct device *dev,
104 104
105 new_bus->name = "GPIO Bitbanged MDIO", 105 new_bus->name = "GPIO Bitbanged MDIO",
106 106
107 ret = -ENODEV;
108
109 new_bus->phy_mask = pdata->phy_mask; 107 new_bus->phy_mask = pdata->phy_mask;
110 new_bus->irq = pdata->irqs; 108 new_bus->irq = pdata->irqs;
111 new_bus->parent = dev; 109 new_bus->parent = dev;
@@ -129,15 +127,8 @@ static int __devinit mdio_gpio_bus_init(struct device *dev,
129 127
130 dev_set_drvdata(dev, new_bus); 128 dev_set_drvdata(dev, new_bus);
131 129
132 ret = mdiobus_register(new_bus); 130 return new_bus;
133 if (ret)
134 goto out_free_all;
135
136 return 0;
137 131
138out_free_all:
139 dev_set_drvdata(dev, NULL);
140 gpio_free(bitbang->mdio);
141out_free_mdc: 132out_free_mdc:
142 gpio_free(bitbang->mdc); 133 gpio_free(bitbang->mdc);
143out_free_bus: 134out_free_bus:
@@ -145,30 +136,47 @@ out_free_bus:
145out_free_bitbang: 136out_free_bitbang:
146 kfree(bitbang); 137 kfree(bitbang);
147out: 138out:
148 return ret; 139 return NULL;
149} 140}
150 141
151static void __devexit mdio_gpio_bus_destroy(struct device *dev) 142static void __devinit mdio_gpio_bus_deinit(struct device *dev)
152{ 143{
153 struct mii_bus *bus = dev_get_drvdata(dev); 144 struct mii_bus *bus = dev_get_drvdata(dev);
154 struct mdio_gpio_info *bitbang = bus->priv; 145 struct mdio_gpio_info *bitbang = bus->priv;
155 146
156 mdiobus_unregister(bus);
157 free_mdio_bitbang(bus);
158 dev_set_drvdata(dev, NULL); 147 dev_set_drvdata(dev, NULL);
159 gpio_free(bitbang->mdc);
160 gpio_free(bitbang->mdio); 148 gpio_free(bitbang->mdio);
149 gpio_free(bitbang->mdc);
150 free_mdio_bitbang(bus);
161 kfree(bitbang); 151 kfree(bitbang);
162} 152}
163 153
154static void __devexit mdio_gpio_bus_destroy(struct device *dev)
155{
156 struct mii_bus *bus = dev_get_drvdata(dev);
157
158 mdiobus_unregister(bus);
159 mdio_gpio_bus_deinit(dev);
160}
161
164static int __devinit mdio_gpio_probe(struct platform_device *pdev) 162static int __devinit mdio_gpio_probe(struct platform_device *pdev)
165{ 163{
166 struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; 164 struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data;
165 struct mii_bus *new_bus;
166 int ret;
167 167
168 if (!pdata) 168 if (!pdata)
169 return -ENODEV; 169 return -ENODEV;
170 170
171 return mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); 171 new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id);
172 if (!new_bus)
173 return -ENODEV;
174
175 ret = mdiobus_register(new_bus);
176 if (ret)
177 mdio_gpio_bus_deinit(&pdev->dev);
178
179 return ret;
172} 180}
173 181
174static int __devexit mdio_gpio_remove(struct platform_device *pdev) 182static int __devexit mdio_gpio_remove(struct platform_device *pdev)
@@ -179,29 +187,12 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
179} 187}
180 188
181#ifdef CONFIG_OF_GPIO 189#ifdef CONFIG_OF_GPIO
182static void __devinit add_phy(struct mdio_gpio_platform_data *pdata,
183 struct device_node *np)
184{
185 const u32 *data;
186 int len, id, irq;
187
188 data = of_get_property(np, "reg", &len);
189 if (!data || len != 4)
190 return;
191
192 id = *data;
193 pdata->phy_mask &= ~(1 << id);
194
195 irq = of_irq_to_resource(np, 0, NULL);
196 if (irq)
197 pdata->irqs[id] = irq;
198}
199 190
200static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, 191static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
201 const struct of_device_id *match) 192 const struct of_device_id *match)
202{ 193{
203 struct device_node *np = NULL;
204 struct mdio_gpio_platform_data *pdata; 194 struct mdio_gpio_platform_data *pdata;
195 struct mii_bus *new_bus;
205 int ret; 196 int ret;
206 197
207 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 198 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
@@ -215,14 +206,18 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
215 206
216 ret = of_get_gpio(ofdev->node, 1); 207 ret = of_get_gpio(ofdev->node, 1);
217 if (ret < 0) 208 if (ret < 0)
218 goto out_free; 209 goto out_free;
219 pdata->mdio = ret; 210 pdata->mdio = ret;
220 211
221 while ((np = of_get_next_child(ofdev->node, np))) 212 new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc);
222 if (!strcmp(np->type, "ethernet-phy")) 213 if (!new_bus)
223 add_phy(pdata, np); 214 return -ENODEV;
224 215
225 return mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc); 216 ret = of_mdiobus_register(new_bus, ofdev->node);
217 if (ret)
218 mdio_gpio_bus_deinit(&ofdev->dev);
219
220 return ret;
226 221
227out_free: 222out_free:
228 kfree(pdata); 223 kfree(pdata);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 61755cbd978e..eda94fcd4065 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -928,13 +928,32 @@ static void phy_state_machine(struct work_struct *work)
928 * Otherwise, it's 0, and we're 928 * Otherwise, it's 0, and we're
929 * still waiting for AN */ 929 * still waiting for AN */
930 if (err > 0) { 930 if (err > 0) {
931 phydev->state = PHY_RUNNING; 931 err = phy_read_status(phydev);
932 if (err)
933 break;
934
935 if (phydev->link) {
936 phydev->state = PHY_RUNNING;
937 netif_carrier_on(phydev->attached_dev);
938 } else
939 phydev->state = PHY_NOLINK;
940 phydev->adjust_link(phydev->attached_dev);
932 } else { 941 } else {
933 phydev->state = PHY_AN; 942 phydev->state = PHY_AN;
934 phydev->link_timeout = PHY_AN_TIMEOUT; 943 phydev->link_timeout = PHY_AN_TIMEOUT;
935 } 944 }
936 } else 945 } else {
937 phydev->state = PHY_RUNNING; 946 err = phy_read_status(phydev);
947 if (err)
948 break;
949
950 if (phydev->link) {
951 phydev->state = PHY_RUNNING;
952 netif_carrier_on(phydev->attached_dev);
953 } else
954 phydev->state = PHY_NOLINK;
955 phydev->adjust_link(phydev->attached_dev);
956 }
938 break; 957 break;
939 } 958 }
940 959
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index eba937c46376..b10fedd82143 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -134,8 +134,10 @@ int phy_scan_fixups(struct phy_device *phydev)
134 134
135 err = fixup->run(phydev); 135 err = fixup->run(phydev);
136 136
137 if (err < 0) 137 if (err < 0) {
138 mutex_unlock(&phy_fixup_lock);
138 return err; 139 return err;
140 }
139 } 141 }
140 } 142 }
141 mutex_unlock(&phy_fixup_lock); 143 mutex_unlock(&phy_fixup_lock);
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 7a62f781fef2..2ca8b0d84ee2 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -270,6 +270,9 @@ static const struct net_device_ops plip_netdev_ops = {
270 .ndo_stop = plip_close, 270 .ndo_stop = plip_close,
271 .ndo_start_xmit = plip_tx_packet, 271 .ndo_start_xmit = plip_tx_packet,
272 .ndo_do_ioctl = plip_ioctl, 272 .ndo_do_ioctl = plip_ioctl,
273 .ndo_change_mtu = eth_change_mtu,
274 .ndo_set_mac_address = eth_mac_addr,
275 .ndo_validate_addr = eth_validate_addr,
273}; 276};
274 277
275/* Entry point of PLIP driver. 278/* Entry point of PLIP driver.
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 17c116bb332c..6de8399d6dd9 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -356,6 +356,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
356 if (!skb_queue_empty(&ap->rqueue)) 356 if (!skb_queue_empty(&ap->rqueue))
357 tasklet_schedule(&ap->tsk); 357 tasklet_schedule(&ap->tsk);
358 ap_put(ap); 358 ap_put(ap);
359 tty_unthrottle(tty);
359} 360}
360 361
361static void 362static void
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 639d11bc444e..cd37d739ac74 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1384,7 +1384,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1384 1384
1385 /* create a fragment for each channel */ 1385 /* create a fragment for each channel */
1386 bits = B; 1386 bits = B;
1387 while (nfree > 0 && len > 0) { 1387 while (len > 0) {
1388 list = list->next; 1388 list = list->next;
1389 if (list == &ppp->channels) { 1389 if (list == &ppp->channels) {
1390 i = 0; 1390 i = 0;
@@ -1431,29 +1431,31 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1431 *otherwise divide it according to the speed 1431 *otherwise divide it according to the speed
1432 *of the channel we are going to transmit on 1432 *of the channel we are going to transmit on
1433 */ 1433 */
1434 if (pch->speed == 0) { 1434 if (nfree > 0) {
1435 flen = totlen/nfree ; 1435 if (pch->speed == 0) {
1436 if (nbigger > 0) { 1436 flen = totlen/nfree ;
1437 flen++; 1437 if (nbigger > 0) {
1438 nbigger--; 1438 flen++;
1439 } 1439 nbigger--;
1440 } else { 1440 }
1441 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / 1441 } else {
1442 ((totspeed*totfree)/pch->speed)) - hdrlen; 1442 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
1443 if (nbigger > 0) { 1443 ((totspeed*totfree)/pch->speed)) - hdrlen;
1444 flen += ((totfree - nzero)*pch->speed)/totspeed; 1444 if (nbigger > 0) {
1445 nbigger -= ((totfree - nzero)*pch->speed)/ 1445 flen += ((totfree - nzero)*pch->speed)/totspeed;
1446 nbigger -= ((totfree - nzero)*pch->speed)/
1446 totspeed; 1447 totspeed;
1448 }
1447 } 1449 }
1450 nfree--;
1448 } 1451 }
1449 nfree--;
1450 1452
1451 /* 1453 /*
1452 *check if we are on the last channel or 1454 *check if we are on the last channel or
1453 *we exceded the lenght of the data to 1455 *we exceded the lenght of the data to
1454 *fragment 1456 *fragment
1455 */ 1457 */
1456 if ((nfree == 0) || (flen > len)) 1458 if ((nfree <= 0) || (flen > len))
1457 flen = len; 1459 flen = len;
1458 /* 1460 /*
1459 *it is not worth to tx on slow channels: 1461 *it is not worth to tx on slow channels:
@@ -1467,7 +1469,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1467 continue; 1469 continue;
1468 } 1470 }
1469 1471
1470 mtu = pch->chan->mtu + 2 - hdrlen; 1472 mtu = pch->chan->mtu - hdrlen;
1471 if (mtu < 4) 1473 if (mtu < 4)
1472 mtu = 4; 1474 mtu = 4;
1473 if (flen > mtu) 1475 if (flen > mtu)
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index aa3d39f38e22..d2fa2db13586 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -397,6 +397,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
397 if (!skb_queue_empty(&ap->rqueue)) 397 if (!skb_queue_empty(&ap->rqueue))
398 tasklet_schedule(&ap->tsk); 398 tasklet_schedule(&ap->tsk);
399 sp_put(ap); 399 sp_put(ap);
400 tty_unthrottle(tty);
400} 401}
401 402
402static void 403static void
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index f0031f1f97e5..5f2090233d7b 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -1063,6 +1063,7 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1063 else { 1063 else {
1064 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 1064 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
1065 1065
1066 po = NULL;
1066 while (++hash < PPPOE_HASH_SIZE) { 1067 while (++hash < PPPOE_HASH_SIZE) {
1067 po = pn->hash_table[hash]; 1068 po = pn->hash_table[hash];
1068 if (po) 1069 if (po)
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index e7935d09c896..e0f9219a0aea 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2680,6 +2680,7 @@ out_unregister_pppol2tp_proto:
2680static void __exit pppol2tp_exit(void) 2680static void __exit pppol2tp_exit(void)
2681{ 2681{
2682 unregister_pppox_proto(PX_PROTO_OL2TP); 2682 unregister_pppox_proto(PX_PROTO_OL2TP);
2683 unregister_pernet_gen_device(pppol2tp_net_id, &pppol2tp_net_ops);
2683 proto_unregister(&pppol2tp_sk_proto); 2684 proto_unregister(&pppol2tp_sk_proto);
2684} 2685}
2685 2686
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index d1a5fb4d6acb..a3932c9f3406 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -1411,6 +1411,7 @@ static const struct net_device_ops gelic_netdevice_ops = {
1411 .ndo_set_multicast_list = gelic_net_set_multi, 1411 .ndo_set_multicast_list = gelic_net_set_multi,
1412 .ndo_change_mtu = gelic_net_change_mtu, 1412 .ndo_change_mtu = gelic_net_change_mtu,
1413 .ndo_tx_timeout = gelic_net_tx_timeout, 1413 .ndo_tx_timeout = gelic_net_tx_timeout,
1414 .ndo_set_mac_address = eth_mac_addr,
1414 .ndo_validate_addr = eth_validate_addr, 1415 .ndo_validate_addr = eth_validate_addr,
1415#ifdef CONFIG_NET_POLL_CONTROLLER 1416#ifdef CONFIG_NET_POLL_CONTROLLER
1416 .ndo_poll_controller = gelic_net_poll_controller, 1417 .ndo_poll_controller = gelic_net_poll_controller,
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index b6b3ca9bdb21..6932b08d746b 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -2707,6 +2707,7 @@ static const struct net_device_ops gelic_wl_netdevice_ops = {
2707 .ndo_set_multicast_list = gelic_net_set_multi, 2707 .ndo_set_multicast_list = gelic_net_set_multi,
2708 .ndo_change_mtu = gelic_net_change_mtu, 2708 .ndo_change_mtu = gelic_net_change_mtu,
2709 .ndo_tx_timeout = gelic_net_tx_timeout, 2709 .ndo_tx_timeout = gelic_net_tx_timeout,
2710 .ndo_set_mac_address = eth_mac_addr,
2710 .ndo_validate_addr = eth_validate_addr, 2711 .ndo_validate_addr = eth_validate_addr,
2711#ifdef CONFIG_NET_POLL_CONTROLLER 2712#ifdef CONFIG_NET_POLL_CONTROLLER
2712 .ndo_poll_controller = gelic_net_poll_controller, 2713 .ndo_poll_controller = gelic_net_poll_controller,
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 156e02e8905d..6ed5317ab1c0 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1607,6 +1607,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev);
1607int ql_cam_route_initialize(struct ql_adapter *qdev); 1607int ql_cam_route_initialize(struct ql_adapter *qdev);
1608int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 1608int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
1609int ql_mb_about_fw(struct ql_adapter *qdev); 1609int ql_mb_about_fw(struct ql_adapter *qdev);
1610void ql_link_on(struct ql_adapter *qdev);
1611void ql_link_off(struct ql_adapter *qdev);
1610 1612
1611#if 1 1613#if 1
1612#define QL_ALL_DUMP 1614#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 37c99fe79770..eb6a9ee640ed 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -59,7 +59,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
59 cqicb->pkt_delay = 59 cqicb->pkt_delay =
60 cpu_to_le16(qdev->tx_max_coalesced_frames); 60 cpu_to_le16(qdev->tx_max_coalesced_frames);
61 cqicb->flags = FLAGS_LI; 61 cqicb->flags = FLAGS_LI;
62 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb), 62 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
63 CFG_LCQ, rx_ring->cq_id); 63 CFG_LCQ, rx_ring->cq_id);
64 if (status) { 64 if (status) {
65 QPRINTK(qdev, IFUP, ERR, 65 QPRINTK(qdev, IFUP, ERR,
@@ -82,7 +82,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
82 cqicb->pkt_delay = 82 cqicb->pkt_delay =
83 cpu_to_le16(qdev->rx_max_coalesced_frames); 83 cpu_to_le16(qdev->rx_max_coalesced_frames);
84 cqicb->flags = FLAGS_LI; 84 cqicb->flags = FLAGS_LI;
85 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb), 85 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
86 CFG_LCQ, rx_ring->cq_id); 86 CFG_LCQ, rx_ring->cq_id);
87 if (status) { 87 if (status) {
88 QPRINTK(qdev, IFUP, ERR, 88 QPRINTK(qdev, IFUP, ERR,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 90d1f76c0e8b..5768af17f168 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -214,6 +214,10 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
214 return -ENOMEM; 214 return -ENOMEM;
215 } 215 }
216 216
217 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
218 if (status)
219 return status;
220
217 status = ql_wait_cfg(qdev, bit); 221 status = ql_wait_cfg(qdev, bit);
218 if (status) { 222 if (status) {
219 QPRINTK(qdev, IFUP, ERR, 223 QPRINTK(qdev, IFUP, ERR,
@@ -221,12 +225,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
221 goto exit; 225 goto exit;
222 } 226 }
223 227
224 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
225 if (status)
226 goto exit;
227 ql_write32(qdev, ICB_L, (u32) map); 228 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32)); 229 ql_write32(qdev, ICB_H, (u32) (map >> 32));
229 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
230 230
231 mask = CFG_Q_MASK | (bit << 16); 231 mask = CFG_Q_MASK | (bit << 16);
232 value = bit | (q_id << CFG_Q_SHIFT); 232 value = bit | (q_id << CFG_Q_SHIFT);
@@ -237,6 +237,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
237 */ 237 */
238 status = ql_wait_cfg(qdev, bit); 238 status = ql_wait_cfg(qdev, bit);
239exit: 239exit:
240 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
240 pci_unmap_single(qdev->pdev, map, size, direction); 241 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status; 242 return status;
242} 243}
@@ -412,6 +413,57 @@ exit:
412 return status; 413 return status;
413} 414}
414 415
416/* Set or clear MAC address in hardware. We sometimes
417 * have to clear it to prevent wrong frame routing
418 * especially in a bonding environment.
419 */
420static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
421{
422 int status;
423 char zero_mac_addr[ETH_ALEN];
424 char *addr;
425
426 if (set) {
427 addr = &qdev->ndev->dev_addr[0];
428 QPRINTK(qdev, IFUP, DEBUG,
429 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
430 addr[0], addr[1], addr[2], addr[3],
431 addr[4], addr[5]);
432 } else {
433 memset(zero_mac_addr, 0, ETH_ALEN);
434 addr = &zero_mac_addr[0];
435 QPRINTK(qdev, IFUP, DEBUG,
436 "Clearing MAC address on %s\n",
437 qdev->ndev->name);
438 }
439 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
440 if (status)
441 return status;
442 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
443 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
444 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
445 if (status)
446 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
447 "address.\n");
448 return status;
449}
450
451void ql_link_on(struct ql_adapter *qdev)
452{
453 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
454 qdev->ndev->name);
455 netif_carrier_on(qdev->ndev);
456 ql_set_mac_addr(qdev, 1);
457}
458
459void ql_link_off(struct ql_adapter *qdev)
460{
461 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
462 qdev->ndev->name);
463 netif_carrier_off(qdev->ndev);
464 ql_set_mac_addr(qdev, 0);
465}
466
415/* Get a specific frame routing value from the CAM. 467/* Get a specific frame routing value from the CAM.
416 * Used for debug and reg dump. 468 * Used for debug and reg dump.
417 */ 469 */
@@ -1628,7 +1680,7 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1628 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; 1680 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1629 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; 1681 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1630 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); 1682 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1631 qdev->stats.tx_bytes += tx_ring_desc->map_cnt; 1683 qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
1632 qdev->stats.tx_packets++; 1684 qdev->stats.tx_packets++;
1633 dev_kfree_skb(tx_ring_desc->skb); 1685 dev_kfree_skb(tx_ring_desc->skb);
1634 tx_ring_desc->skb = NULL; 1686 tx_ring_desc->skb = NULL;
@@ -1660,13 +1712,13 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1660/* Fire up a handler to reset the MPI processor. */ 1712/* Fire up a handler to reset the MPI processor. */
1661void ql_queue_fw_error(struct ql_adapter *qdev) 1713void ql_queue_fw_error(struct ql_adapter *qdev)
1662{ 1714{
1663 netif_carrier_off(qdev->ndev); 1715 ql_link_off(qdev);
1664 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); 1716 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1665} 1717}
1666 1718
1667void ql_queue_asic_error(struct ql_adapter *qdev) 1719void ql_queue_asic_error(struct ql_adapter *qdev)
1668{ 1720{
1669 netif_carrier_off(qdev->ndev); 1721 ql_link_off(qdev);
1670 ql_disable_interrupts(qdev); 1722 ql_disable_interrupts(qdev);
1671 /* Clear adapter up bit to signal the recovery 1723 /* Clear adapter up bit to signal the recovery
1672 * process that it shouldn't kill the reset worker 1724 * process that it shouldn't kill the reset worker
@@ -2104,7 +2156,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
2104 } 2156 }
2105 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; 2157 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2106 mac_iocb_ptr = tx_ring_desc->queue_entry; 2158 mac_iocb_ptr = tx_ring_desc->queue_entry;
2107 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); 2159 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2108 2160
2109 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; 2161 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2110 mac_iocb_ptr->tid = tx_ring_desc->index; 2162 mac_iocb_ptr->tid = tx_ring_desc->index;
@@ -2743,7 +2795,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2743 2795
2744 ql_init_tx_ring(qdev, tx_ring); 2796 ql_init_tx_ring(qdev, tx_ring);
2745 2797
2746 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ, 2798 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
2747 (u16) tx_ring->wq_id); 2799 (u16) tx_ring->wq_id);
2748 if (err) { 2800 if (err) {
2749 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n"); 2801 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
@@ -3008,7 +3060,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
3008 int i; 3060 int i;
3009 u8 *hash_id = (u8 *) ricb->hash_cq_id; 3061 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3010 3062
3011 memset((void *)ricb, 0, sizeof(ricb)); 3063 memset((void *)ricb, 0, sizeof(*ricb));
3012 3064
3013 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K; 3065 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
3014 ricb->flags = 3066 ricb->flags =
@@ -3030,7 +3082,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
3030 3082
3031 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); 3083 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
3032 3084
3033 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0); 3085 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3034 if (status) { 3086 if (status) {
3035 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n"); 3087 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3036 return status; 3088 return status;
@@ -3039,25 +3091,40 @@ static int ql_start_rss(struct ql_adapter *qdev)
3039 return status; 3091 return status;
3040} 3092}
3041 3093
3042/* Initialize the frame-to-queue routing. */ 3094static int ql_clear_routing_entries(struct ql_adapter *qdev)
3043static int ql_route_initialize(struct ql_adapter *qdev)
3044{ 3095{
3045 int status = 0; 3096 int i, status = 0;
3046 int i;
3047 3097
3048 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3098 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3049 if (status) 3099 if (status)
3050 return status; 3100 return status;
3051
3052 /* Clear all the entries in the routing table. */ 3101 /* Clear all the entries in the routing table. */
3053 for (i = 0; i < 16; i++) { 3102 for (i = 0; i < 16; i++) {
3054 status = ql_set_routing_reg(qdev, i, 0, 0); 3103 status = ql_set_routing_reg(qdev, i, 0, 0);
3055 if (status) { 3104 if (status) {
3056 QPRINTK(qdev, IFUP, ERR, 3105 QPRINTK(qdev, IFUP, ERR,
3057 "Failed to init routing register for CAM packets.\n"); 3106 "Failed to init routing register for CAM "
3058 goto exit; 3107 "packets.\n");
3108 break;
3059 } 3109 }
3060 } 3110 }
3111 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3112 return status;
3113}
3114
3115/* Initialize the frame-to-queue routing. */
3116static int ql_route_initialize(struct ql_adapter *qdev)
3117{
3118 int status = 0;
3119
3120 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3121 if (status)
3122 return status;
3123
3124 /* Clear all the entries in the routing table. */
3125 status = ql_clear_routing_entries(qdev);
3126 if (status)
3127 goto exit;
3061 3128
3062 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); 3129 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3063 if (status) { 3130 if (status) {
@@ -3096,14 +3163,15 @@ exit:
3096 3163
3097int ql_cam_route_initialize(struct ql_adapter *qdev) 3164int ql_cam_route_initialize(struct ql_adapter *qdev)
3098{ 3165{
3099 int status; 3166 int status, set;
3100 3167
3101 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 3168 /* If check if the link is up and use to
3102 if (status) 3169 * determine if we are setting or clearing
3103 return status; 3170 * the MAC address in the CAM.
3104 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr, 3171 */
3105 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); 3172 set = ql_read32(qdev, STS);
3106 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 3173 set &= qdev->port_link_up;
3174 status = ql_set_mac_addr(qdev, set);
3107 if (status) { 3175 if (status) {
3108 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n"); 3176 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3109 return status; 3177 return status;
@@ -3210,9 +3278,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3210{ 3278{
3211 u32 value; 3279 u32 value;
3212 int status = 0; 3280 int status = 0;
3213 unsigned long end_jiffies = jiffies + 3281 unsigned long end_jiffies;
3214 max((unsigned long)1, usecs_to_jiffies(30));
3215 3282
3283 /* Clear all the entries in the routing table. */
3284 status = ql_clear_routing_entries(qdev);
3285 if (status) {
3286 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3287 return status;
3288 }
3289
3290 end_jiffies = jiffies +
3291 max((unsigned long)1, usecs_to_jiffies(30));
3216 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); 3292 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3217 3293
3218 do { 3294 do {
@@ -3252,7 +3328,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3252 int i, status = 0; 3328 int i, status = 0;
3253 struct rx_ring *rx_ring; 3329 struct rx_ring *rx_ring;
3254 3330
3255 netif_carrier_off(qdev->ndev); 3331 ql_link_off(qdev);
3256 3332
3257 /* Don't kill the reset worker thread if we 3333 /* Don't kill the reset worker thread if we
3258 * are in the process of recovery. 3334 * are in the process of recovery.
@@ -3319,8 +3395,12 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3319 } 3395 }
3320 set_bit(QL_ADAPTER_UP, &qdev->flags); 3396 set_bit(QL_ADAPTER_UP, &qdev->flags);
3321 ql_alloc_rx_buffers(qdev); 3397 ql_alloc_rx_buffers(qdev);
3322 if ((ql_read32(qdev, STS) & qdev->port_init)) 3398 /* If the port is initialized and the
3323 netif_carrier_on(qdev->ndev); 3399 * link is up the turn on the carrier.
3400 */
3401 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3402 (ql_read32(qdev, STS) & qdev->port_link_up))
3403 ql_link_on(qdev);
3324 ql_enable_interrupts(qdev); 3404 ql_enable_interrupts(qdev);
3325 ql_enable_all_completion_interrupts(qdev); 3405 ql_enable_all_completion_interrupts(qdev);
3326 netif_tx_start_all_queues(qdev->ndev); 3406 netif_tx_start_all_queues(qdev->ndev);
@@ -3346,11 +3426,6 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
3346 return -ENOMEM; 3426 return -ENOMEM;
3347 } 3427 }
3348 status = ql_request_irq(qdev); 3428 status = ql_request_irq(qdev);
3349 if (status)
3350 goto err_irq;
3351 return status;
3352err_irq:
3353 ql_free_mem_resources(qdev);
3354 return status; 3429 return status;
3355} 3430}
3356 3431
@@ -3414,7 +3489,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3414 3489
3415 for (i = 0; i < qdev->tx_ring_count; i++) { 3490 for (i = 0; i < qdev->tx_ring_count; i++) {
3416 tx_ring = &qdev->tx_ring[i]; 3491 tx_ring = &qdev->tx_ring[i];
3417 memset((void *)tx_ring, 0, sizeof(tx_ring)); 3492 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3418 tx_ring->qdev = qdev; 3493 tx_ring->qdev = qdev;
3419 tx_ring->wq_id = i; 3494 tx_ring->wq_id = i;
3420 tx_ring->wq_len = qdev->tx_ring_size; 3495 tx_ring->wq_len = qdev->tx_ring_size;
@@ -3430,7 +3505,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3430 3505
3431 for (i = 0; i < qdev->rx_ring_count; i++) { 3506 for (i = 0; i < qdev->rx_ring_count; i++) {
3432 rx_ring = &qdev->rx_ring[i]; 3507 rx_ring = &qdev->rx_ring[i];
3433 memset((void *)rx_ring, 0, sizeof(rx_ring)); 3508 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3434 rx_ring->qdev = qdev; 3509 rx_ring->qdev = qdev;
3435 rx_ring->cq_id = i; 3510 rx_ring->cq_id = i;
3436 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ 3511 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
@@ -3789,7 +3864,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3789 int pos, err = 0; 3864 int pos, err = 0;
3790 u16 val16; 3865 u16 val16;
3791 3866
3792 memset((void *)qdev, 0, sizeof(qdev)); 3867 memset((void *)qdev, 0, sizeof(*qdev));
3793 err = pci_enable_device(pdev); 3868 err = pci_enable_device(pdev);
3794 if (err) { 3869 if (err) {
3795 dev_err(&pdev->dev, "PCI device enable failed.\n"); 3870 dev_err(&pdev->dev, "PCI device enable failed.\n");
@@ -3976,7 +4051,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
3976 pci_disable_device(pdev); 4051 pci_disable_device(pdev);
3977 return err; 4052 return err;
3978 } 4053 }
3979 netif_carrier_off(ndev); 4054 ql_link_off(qdev);
3980 ql_display_dev_info(ndev); 4055 ql_display_dev_info(ndev);
3981 cards_found++; 4056 cards_found++;
3982 return 0; 4057 return 0;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 71afbf8b9c50..6685bd97da91 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -238,7 +238,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
238 &qdev->mpi_port_cfg_work, 0); 238 &qdev->mpi_port_cfg_work, 0);
239 } 239 }
240 240
241 netif_carrier_on(qdev->ndev); 241 ql_link_on(qdev);
242} 242}
243 243
244static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) 244static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
@@ -251,7 +251,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
251 if (status) 251 if (status)
252 QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n"); 252 QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
253 253
254 netif_carrier_off(qdev->ndev); 254 ql_link_off(qdev);
255} 255}
256 256
257static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) 257static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
@@ -849,7 +849,7 @@ void ql_mpi_idc_work(struct work_struct *work)
849 case MB_CMD_PORT_RESET: 849 case MB_CMD_PORT_RESET:
850 case MB_CMD_SET_PORT_CFG: 850 case MB_CMD_SET_PORT_CFG:
851 case MB_CMD_STOP_FW: 851 case MB_CMD_STOP_FW:
852 netif_carrier_off(qdev->ndev); 852 ql_link_off(qdev);
853 /* Signal the resulting link up AEN 853 /* Signal the resulting link up AEN
854 * that the frame routing and mac addr 854 * that the frame routing and mac addr
855 * needs to be set. 855 * needs to be set.
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index ed63d23a6452..961b5397a531 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
49#include <asm/processor.h> 49#include <asm/processor.h>
50 50
51#define DRV_NAME "r6040" 51#define DRV_NAME "r6040"
52#define DRV_VERSION "0.23" 52#define DRV_VERSION "0.24"
53#define DRV_RELDATE "05May2009" 53#define DRV_RELDATE "08Jul2009"
54 54
55/* PHY CHIP Address */ 55/* PHY CHIP Address */
56#define PHY1_ADDR 1 /* For MAC1 */ 56#define PHY1_ADDR 1 /* For MAC1 */
@@ -704,8 +704,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
704 /* Read MISR status and clear */ 704 /* Read MISR status and clear */
705 status = ioread16(ioaddr + MISR); 705 status = ioread16(ioaddr + MISR);
706 706
707 if (status == 0x0000 || status == 0xffff) 707 if (status == 0x0000 || status == 0xffff) {
708 /* Restore RDC MAC interrupt */
709 iowrite16(misr, ioaddr + MIER);
708 return IRQ_NONE; 710 return IRQ_NONE;
711 }
709 712
710 /* RX interrupt request */ 713 /* RX interrupt request */
711 if (status & RX_INTS) { 714 if (status & RX_INTS) {
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 4b53b58d75fc..b82780d805f5 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2060,8 +2060,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2060 } 2060 }
2061 } 2061 }
2062 2062
2063 pci_set_master(pdev);
2064
2065 /* ioremap MMIO region */ 2063 /* ioremap MMIO region */
2066 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); 2064 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
2067 if (!ioaddr) { 2065 if (!ioaddr) {
@@ -2089,6 +2087,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2089 2087
2090 RTL_W16(IntrStatus, 0xffff); 2088 RTL_W16(IntrStatus, 0xffff);
2091 2089
2090 pci_set_master(pdev);
2091
2092 /* Identify chip attached to board */ 2092 /* Identify chip attached to board */
2093 rtl8169_get_mac_version(tp, ioaddr); 2093 rtl8169_get_mac_version(tp, ioaddr);
2094 2094
@@ -3874,6 +3874,15 @@ static void rtl_shutdown(struct pci_dev *pdev)
3874 spin_unlock_irq(&tp->lock); 3874 spin_unlock_irq(&tp->lock);
3875 3875
3876 if (system_state == SYSTEM_POWER_OFF) { 3876 if (system_state == SYSTEM_POWER_OFF) {
3877 /* WoL fails with some 8168 when the receiver is disabled. */
3878 if (tp->features & RTL_FEATURE_WOL) {
3879 pci_clear_master(pdev);
3880
3881 RTL_W8(ChipCmd, CmdRxEnb);
3882 /* PCI commit */
3883 RTL_R8(ChipCmd);
3884 }
3885
3877 pci_wake_from_d3(pdev, true); 3886 pci_wake_from_d3(pdev, true);
3878 pci_set_power_state(pdev, PCI_D3hot); 3887 pci_set_power_state(pdev, PCI_D3hot);
3879 } 3888 }
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
index 5345e47b35ac..4525cbe8dd69 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/s6gmac.c
@@ -793,7 +793,7 @@ static inline int s6gmac_phy_start(struct net_device *dev)
793 struct s6gmac *pd = netdev_priv(dev); 793 struct s6gmac *pd = netdev_priv(dev);
794 int i = 0; 794 int i = 0;
795 struct phy_device *p = NULL; 795 struct phy_device *p = NULL;
796 while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR)) 796 while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
797 i++; 797 i++;
798 p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, 798 p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
799 PHY_INTERFACE_MODE_RGMII); 799 PHY_INTERFACE_MODE_RGMII);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 18821f217e19..e3156c97bb58 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1593,6 +1593,7 @@ out:
1593static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = { 1593static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
1594 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, 1594 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
1595 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, 1595 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
1596 { PCI_DEVICE(0x1088, 0x2031) },
1596 { 0, } 1597 { 0, }
1597}; 1598};
1598MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); 1599MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 341882f959f3..a2d82ddb3b4d 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -865,8 +865,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
865 struct sh_eth_private *mdp = netdev_priv(ndev); 865 struct sh_eth_private *mdp = netdev_priv(ndev);
866 struct sh_eth_cpu_data *cd = mdp->cd; 866 struct sh_eth_cpu_data *cd = mdp->cd;
867 irqreturn_t ret = IRQ_NONE; 867 irqreturn_t ret = IRQ_NONE;
868 u32 ioaddr, boguscnt = RX_RING_SIZE; 868 u32 ioaddr, intr_status = 0;
869 u32 intr_status = 0;
870 869
871 ioaddr = ndev->base_addr; 870 ioaddr = ndev->base_addr;
872 spin_lock(&mdp->lock); 871 spin_lock(&mdp->lock);
@@ -901,12 +900,6 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
901 if (intr_status & cd->eesr_err_check) 900 if (intr_status & cd->eesr_err_check)
902 sh_eth_error(ndev, intr_status); 901 sh_eth_error(ndev, intr_status);
903 902
904 if (--boguscnt < 0) {
905 printk(KERN_WARNING
906 "%s: Too much work at interrupt, status=0x%4.4x.\n",
907 ndev->name, intr_status);
908 }
909
910other_irq: 903other_irq:
911 spin_unlock(&mdp->lock); 904 spin_unlock(&mdp->lock);
912 905
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 60d502eef4fc..543af2044f40 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3854,8 +3854,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3854 skge->speed = -1; 3854 skge->speed = -1;
3855 skge->advertising = skge_supported_modes(hw); 3855 skge->advertising = skge_supported_modes(hw);
3856 3856
3857 if (device_may_wakeup(&hw->pdev->dev)) 3857 if (device_can_wakeup(&hw->pdev->dev)) {
3858 skge->wol = wol_supported(hw) & WAKE_MAGIC; 3858 skge->wol = wol_supported(hw) & WAKE_MAGIC;
3859 device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
3860 }
3859 3861
3860 hw->dev[port] = dev; 3862 hw->dev[port] = dev;
3861 3863
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7681d28c53d7..0a551d8f5d95 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1151,14 +1151,7 @@ stopped:
1151 1151
1152 /* reset the Rx prefetch unit */ 1152 /* reset the Rx prefetch unit */
1153 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1153 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1154 1154 mmiowb();
1155 /* Reset the RAM Buffer receive queue */
1156 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_RST_SET);
1157
1158 /* Reset Rx MAC FIFO */
1159 sky2_write8(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), GMF_RST_SET);
1160
1161 sky2_read8(hw, B0_CTST);
1162} 1155}
1163 1156
1164/* Clean out receive buffer area, assumes receiver hardware stopped */ 1157/* Clean out receive buffer area, assumes receiver hardware stopped */
@@ -1495,6 +1488,8 @@ static int sky2_up(struct net_device *dev)
1495 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); 1488 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
1496#endif 1489#endif
1497 1490
1491 sky2->restarting = 0;
1492
1498 err = sky2_rx_start(sky2); 1493 err = sky2_rx_start(sky2);
1499 if (err) 1494 if (err)
1500 goto err_out; 1495 goto err_out;
@@ -1507,6 +1502,9 @@ static int sky2_up(struct net_device *dev)
1507 1502
1508 sky2_set_multicast(dev); 1503 sky2_set_multicast(dev);
1509 1504
1505 /* wake queue incase we are restarting */
1506 netif_wake_queue(dev);
1507
1510 if (netif_msg_ifup(sky2)) 1508 if (netif_msg_ifup(sky2))
1511 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 1509 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
1512 return 0; 1510 return 0;
@@ -1540,6 +1538,8 @@ static inline int tx_dist(unsigned tail, unsigned head)
1540/* Number of list elements available for next tx */ 1538/* Number of list elements available for next tx */
1541static inline int tx_avail(const struct sky2_port *sky2) 1539static inline int tx_avail(const struct sky2_port *sky2)
1542{ 1540{
1541 if (unlikely(sky2->restarting))
1542 return 0;
1543 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); 1543 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
1544} 1544}
1545 1545
@@ -1825,11 +1825,9 @@ static int sky2_down(struct net_device *dev)
1825 if (netif_msg_ifdown(sky2)) 1825 if (netif_msg_ifdown(sky2))
1826 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 1826 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1827 1827
1828 /* Disable port IRQ */ 1828 /* explicitly shut off tx incase we're restarting */
1829 imask = sky2_read32(hw, B0_IMSK); 1829 sky2->restarting = 1;
1830 imask &= ~portirq_msk[port]; 1830 netif_tx_disable(dev);
1831 sky2_write32(hw, B0_IMSK, imask);
1832 sky2_read32(hw, B0_IMSK);
1833 1831
1834 /* Force flow control off */ 1832 /* Force flow control off */
1835 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1833 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@@ -1870,8 +1868,6 @@ static int sky2_down(struct net_device *dev)
1870 1868
1871 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); 1869 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1872 1870
1873 sky2_rx_stop(sky2);
1874
1875 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 1871 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1876 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 1872 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1877 1873
@@ -1881,6 +1877,14 @@ static int sky2_down(struct net_device *dev)
1881 sky2_write32(hw, STAT_ISR_TIMER_CNT, 0); 1877 sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
1882 sky2_read8(hw, STAT_ISR_TIMER_CTRL); 1878 sky2_read8(hw, STAT_ISR_TIMER_CTRL);
1883 1879
1880 sky2_rx_stop(sky2);
1881
1882 /* Disable port IRQ */
1883 imask = sky2_read32(hw, B0_IMSK);
1884 imask &= ~portirq_msk[port];
1885 sky2_write32(hw, B0_IMSK, imask);
1886 sky2_read32(hw, B0_IMSK);
1887
1884 synchronize_irq(hw->pdev->irq); 1888 synchronize_irq(hw->pdev->irq);
1885 napi_synchronize(&hw->napi); 1889 napi_synchronize(&hw->napi);
1886 1890
@@ -2366,7 +2370,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2366{ 2370{
2367 struct sky2_port *sky2 = netdev_priv(dev); 2371 struct sky2_port *sky2 = netdev_priv(dev);
2368 2372
2369 if (netif_running(dev)) { 2373 if (likely(netif_running(dev) && !sky2->restarting)) {
2370 netif_tx_lock(dev); 2374 netif_tx_lock(dev);
2371 sky2_tx_complete(sky2, last); 2375 sky2_tx_complete(sky2, last);
2372 netif_tx_unlock(dev); 2376 netif_tx_unlock(dev);
@@ -2495,7 +2499,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2495 if (likely(status >> 16 == (status & 0xffff))) { 2499 if (likely(status >> 16 == (status & 0xffff))) {
2496 skb = sky2->rx_ring[sky2->rx_next].skb; 2500 skb = sky2->rx_ring[sky2->rx_next].skb;
2497 skb->ip_summed = CHECKSUM_COMPLETE; 2501 skb->ip_summed = CHECKSUM_COMPLETE;
2498 skb->csum = status & 0xffff; 2502 skb->csum = le16_to_cpu(status);
2499 } else { 2503 } else {
2500 printk(KERN_NOTICE PFX "%s: hardware receive " 2504 printk(KERN_NOTICE PFX "%s: hardware receive "
2501 "checksum problem (status = %#x)\n", 2505 "checksum problem (status = %#x)\n",
@@ -4290,6 +4294,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4290 spin_lock_init(&sky2->phy_lock); 4294 spin_lock_init(&sky2->phy_lock);
4291 sky2->tx_pending = TX_DEF_PENDING; 4295 sky2->tx_pending = TX_DEF_PENDING;
4292 sky2->rx_pending = RX_DEF_PENDING; 4296 sky2->rx_pending = RX_DEF_PENDING;
4297 sky2->restarting = 0;
4293 4298
4294 hw->dev[port] = dev; 4299 hw->dev[port] = dev;
4295 4300
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index b5549c9e5107..4486b066b43f 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2051,6 +2051,7 @@ struct sky2_port {
2051 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ 2051 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
2052 u8 rx_csum; 2052 u8 rx_csum;
2053 u8 wol; 2053 u8 wol;
2054 u8 restarting;
2054 enum flow_control flow_mode; 2055 enum flow_control flow_mode;
2055 enum flow_control flow_status; 2056 enum flow_control flow_status;
2056 2057
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index fdcbaf8dfa73..1c70e999cc50 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1774,6 +1774,7 @@ static const struct net_device_ops smc_netdev_ops = {
1774 .ndo_start_xmit = smc_hard_start_xmit, 1774 .ndo_start_xmit = smc_hard_start_xmit,
1775 .ndo_tx_timeout = smc_timeout, 1775 .ndo_tx_timeout = smc_timeout,
1776 .ndo_set_multicast_list = smc_set_multicast_list, 1776 .ndo_set_multicast_list = smc_set_multicast_list,
1777 .ndo_change_mtu = eth_change_mtu,
1777 .ndo_validate_addr = eth_validate_addr, 1778 .ndo_validate_addr = eth_validate_addr,
1778 .ndo_set_mac_address = eth_mac_addr, 1779 .ndo_set_mac_address = eth_mac_addr,
1779#ifdef CONFIG_NET_POLL_CONTROLLER 1780#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index f1f773b17fe1..57a159fac99f 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -186,7 +186,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
186#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) 186#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
187#define SMC_IRQ_FLAGS (-1) /* from resource */ 187#define SMC_IRQ_FLAGS (-1) /* from resource */
188 188
189#elif defined(CONFIG_MACH_LOGICPD_PXA270) 189#elif defined(CONFIG_MACH_LOGICPD_PXA270) \
190 || defined(CONFIG_MACH_NOMADIK_8815NHK)
190 191
191#define SMC_CAN_USE_8BIT 0 192#define SMC_CAN_USE_8BIT 0
192#define SMC_CAN_USE_16BIT 1 193#define SMC_CAN_USE_16BIT 1
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index b60639bd181b..94b6d2658ddc 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1779,6 +1779,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
1779 .ndo_get_stats = smsc911x_get_stats, 1779 .ndo_get_stats = smsc911x_get_stats,
1780 .ndo_set_multicast_list = smsc911x_set_multicast_list, 1780 .ndo_set_multicast_list = smsc911x_set_multicast_list,
1781 .ndo_do_ioctl = smsc911x_do_ioctl, 1781 .ndo_do_ioctl = smsc911x_do_ioctl,
1782 .ndo_change_mtu = eth_change_mtu,
1782 .ndo_validate_addr = eth_validate_addr, 1783 .ndo_validate_addr = eth_validate_addr,
1783 .ndo_set_mac_address = smsc911x_set_mac_address, 1784 .ndo_set_mac_address = smsc911x_set_mac_address,
1784#ifdef CONFIG_NET_POLL_CONTROLLER 1785#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1938,7 +1939,7 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
1938 if (!res) 1939 if (!res)
1939 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1940 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1940 1941
1941 release_mem_region(res->start, res->end - res->start); 1942 release_mem_region(res->start, resource_size(res));
1942 1943
1943 iounmap(pdata->ioaddr); 1944 iounmap(pdata->ioaddr);
1944 1945
@@ -1976,7 +1977,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1976 retval = -ENODEV; 1977 retval = -ENODEV;
1977 goto out_0; 1978 goto out_0;
1978 } 1979 }
1979 res_size = res->end - res->start + 1; 1980 res_size = resource_size(res);
1980 1981
1981 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1982 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1982 if (!irq_res) { 1983 if (!irq_res) {
@@ -2104,7 +2105,7 @@ out_unmap_io_3:
2104out_free_netdev_2: 2105out_free_netdev_2:
2105 free_netdev(dev); 2106 free_netdev(dev);
2106out_release_io_1: 2107out_release_io_1:
2107 release_mem_region(res->start, res->end - res->start); 2108 release_mem_region(res->start, resource_size(res));
2108out_0: 2109out_0:
2109 return retval; 2110 return retval;
2110} 2111}
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 838cce8b8fff..669253c7bd41 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -180,7 +180,7 @@ static int full_duplex[MAX_UNITS] = {0, };
180/* These identify the driver base version and may not be removed. */ 180/* These identify the driver base version and may not be removed. */
181static const char version[] __devinitconst = 181static const char version[] __devinitconst =
182KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n" 182KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
183KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; 183" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
184 184
185MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 185MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
186MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver"); 186MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 545f81b34ad7..d1521c3875b2 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1698,13 +1698,13 @@ static int netdev_close(struct net_device *dev)
1698 1698
1699#ifdef __i386__ 1699#ifdef __i386__
1700 if (netif_msg_hw(np)) { 1700 if (netif_msg_hw(np)) {
1701 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", 1701 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1702 (int)(np->tx_ring_dma)); 1702 (int)(np->tx_ring_dma));
1703 for (i = 0; i < TX_RING_SIZE; i++) 1703 for (i = 0; i < TX_RING_SIZE; i++)
1704 printk(" #%d desc. %4.4x %8.8x %8.8x.\n", 1704 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1705 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr, 1705 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1706 np->tx_ring[i].frag[0].length); 1706 np->tx_ring[i].frag[0].length);
1707 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", 1707 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1708 (int)(np->rx_ring_dma)); 1708 (int)(np->rx_ring_dma));
1709 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) { 1709 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1710 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", 1710 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index a82fb2aca4cb..f1e5e4542c2a 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -1016,7 +1016,9 @@ static const struct net_device_ops vnet_ops = {
1016 .ndo_open = vnet_open, 1016 .ndo_open = vnet_open,
1017 .ndo_stop = vnet_close, 1017 .ndo_stop = vnet_close,
1018 .ndo_set_multicast_list = vnet_set_rx_mode, 1018 .ndo_set_multicast_list = vnet_set_rx_mode,
1019 .ndo_change_mtu = eth_change_mtu,
1019 .ndo_set_mac_address = vnet_set_mac_addr, 1020 .ndo_set_mac_address = vnet_set_mac_addr,
1021 .ndo_validate_addr = eth_validate_addr,
1020 .ndo_tx_timeout = vnet_tx_timeout, 1022 .ndo_tx_timeout = vnet_tx_timeout,
1021 .ndo_change_mtu = vnet_change_mtu, 1023 .ndo_change_mtu = vnet_change_mtu,
1022 .ndo_start_xmit = vnet_start_xmit, 1024 .ndo_start_xmit = vnet_start_xmit,
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 9d896116cf76..08a6c41c1599 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -1912,7 +1912,7 @@ static int __init ibmtr_init(void)
1912 1912
1913 find_turbo_adapters(io); 1913 find_turbo_adapters(io);
1914 1914
1915 for (i = 0; io[i] && (i < IBMTR_MAX_ADAPTERS); i++) { 1915 for (i = 0; i < IBMTR_MAX_ADAPTERS && io[i]; i++) {
1916 struct net_device *dev; 1916 struct net_device *dev;
1917 irq[i] = 0; 1917 irq[i] = 0;
1918 mem[i] = 0; 1918 mem[i] = 0;
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 0f78f99f9b20..7030bd5e9848 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -1132,7 +1132,9 @@ static int tsi108_get_mac(struct net_device *dev)
1132 } 1132 }
1133 1133
1134 if (!is_valid_ether_addr(dev->dev_addr)) { 1134 if (!is_valid_ether_addr(dev->dev_addr)) {
1135 printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2); 1135 printk(KERN_ERR
1136 "%s: Invalid MAC address. word1: %08x, word2: %08x\n",
1137 dev->name, word1, word2);
1136 return -EINVAL; 1138 return -EINVAL;
1137 } 1139 }
1138 1140
@@ -1201,8 +1203,8 @@ static void tsi108_set_rx_mode(struct net_device *dev)
1201 __set_bit(hash, &data->mc_hash[0]); 1203 __set_bit(hash, &data->mc_hash[0]);
1202 } else { 1204 } else {
1203 printk(KERN_ERR 1205 printk(KERN_ERR
1204 "%s: got multicast address of length %d " 1206 "%s: got multicast address of length %d instead of 6.\n",
1205 "instead of 6.\n", dev->name, 1207 dev->name,
1206 mc->dmi_addrlen); 1208 mc->dmi_addrlen);
1207 } 1209 }
1208 1210
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 81f054dbb88d..ef49744a5085 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -944,9 +944,10 @@ static void de_set_media (struct de_private *de)
944 macmode &= ~FullDuplex; 944 macmode &= ~FullDuplex;
945 945
946 if (netif_msg_link(de)) { 946 if (netif_msg_link(de)) {
947 printk(KERN_INFO "%s: set link %s\n" 947 printk(KERN_INFO
948 KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n" 948 "%s: set link %s\n"
949 KERN_INFO "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n", 949 "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
950 "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
950 de->dev->name, media_name[media], 951 de->dev->name, media_name[media],
951 de->dev->name, dr32(MacMode), dr32(SIAStatus), 952 de->dev->name, dr32(MacMode), dr32(SIAStatus),
952 dr32(CSR13), dr32(CSR14), dr32(CSR15), 953 dr32(CSR13), dr32(CSR14), dr32(CSR15),
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index eb72d2e9ab3d..acfdccd44567 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -5059,7 +5059,7 @@ mii_get_phy(struct net_device *dev)
5059 if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ 5059 if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
5060 for (j=0; j<limit; j++) { /* Search PHY table */ 5060 for (j=0; j<limit; j++) { /* Search PHY table */
5061 if (id != phy_info[j].id) continue; /* ID match? */ 5061 if (id != phy_info[j].id) continue; /* ID match? */
5062 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); 5062 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5063 if (k < DE4X5_MAX_PHY) { 5063 if (k < DE4X5_MAX_PHY) {
5064 memcpy((char *)&lp->phy[k], 5064 memcpy((char *)&lp->phy[k],
5065 (char *)&phy_info[j], sizeof(struct phy_table)); 5065 (char *)&phy_info[j], sizeof(struct phy_table));
@@ -5072,7 +5072,7 @@ mii_get_phy(struct net_device *dev)
5072 break; 5072 break;
5073 } 5073 }
5074 if ((j == limit) && (i < DE4X5_MAX_MII)) { 5074 if ((j == limit) && (i < DE4X5_MAX_MII)) {
5075 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); 5075 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5076 lp->phy[k].addr = i; 5076 lp->phy[k].addr = i;
5077 lp->phy[k].id = id; 5077 lp->phy[k].id = id;
5078 lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ 5078 lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
@@ -5091,7 +5091,7 @@ mii_get_phy(struct net_device *dev)
5091 purgatory: 5091 purgatory:
5092 lp->active = 0; 5092 lp->active = 0;
5093 if (lp->phy[0].id) { /* Reset the PHY devices */ 5093 if (lp->phy[0].id) { /* Reset the PHY devices */
5094 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/ 5094 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/
5095 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); 5095 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5096 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); 5096 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5097 5097
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 2abb5d3becc6..99a63649f4fc 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -570,16 +570,18 @@ static void tulip_tx_timeout(struct net_device *dev)
570 (unsigned int)tp->rx_ring[i].buffer2, 570 (unsigned int)tp->rx_ring[i].buffer2,
571 buf[0], buf[1], buf[2]); 571 buf[0], buf[1], buf[2]);
572 for (j = 0; buf[j] != 0xee && j < 1600; j++) 572 for (j = 0; buf[j] != 0xee && j < 1600; j++)
573 if (j < 100) printk(" %2.2x", buf[j]); 573 if (j < 100)
574 printk(" j=%d.\n", j); 574 printk(KERN_CONT " %2.2x", buf[j]);
575 printk(KERN_CONT " j=%d.\n", j);
575 } 576 }
576 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring); 577 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
577 for (i = 0; i < RX_RING_SIZE; i++) 578 for (i = 0; i < RX_RING_SIZE; i++)
578 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status); 579 printk(KERN_CONT " %8.8x",
579 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring); 580 (unsigned int)tp->rx_ring[i].status);
581 printk(KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
580 for (i = 0; i < TX_RING_SIZE; i++) 582 for (i = 0; i < TX_RING_SIZE; i++)
581 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status); 583 printk(KERN_CONT " %8.8x", (unsigned int)tp->tx_ring[i].status);
582 printk("\n"); 584 printk(KERN_CONT "\n");
583 } 585 }
584#endif 586#endif
585 587
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 842b1a2c40d4..0f15773dae52 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -142,7 +142,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
142static const char version[] __initconst = 142static const char version[] __initconst =
143 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " 143 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) "
144 DRV_RELDATE " Donald Becker <becker@scyld.com>\n" 144 DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
145 KERN_INFO " http://www.scyld.com/network/drivers.html\n"; 145 " http://www.scyld.com/network/drivers.html\n";
146 146
147MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 147MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
148MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver"); 148MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
@@ -939,7 +939,7 @@ static void tx_timeout(struct net_device *dev)
939 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 939 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
940 for (i = 0; i < RX_RING_SIZE; i++) 940 for (i = 0; i < RX_RING_SIZE; i++)
941 printk(" %8.8x", (unsigned int)np->rx_ring[i].status); 941 printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
942 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring); 942 printk(KERN_DEBUG" Tx ring %p: ", np->tx_ring);
943 for (i = 0; i < TX_RING_SIZE; i++) 943 for (i = 0; i < TX_RING_SIZE; i++)
944 printk(" %8.8x", np->tx_ring[i].status); 944 printk(" %8.8x", np->tx_ring[i].status);
945 printk("\n"); 945 printk("\n");
@@ -1520,7 +1520,7 @@ static int netdev_close(struct net_device *dev)
1520 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n", 1520 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
1521 i, np->tx_ring[i].length, 1521 i, np->tx_ring[i].length,
1522 np->tx_ring[i].status, np->tx_ring[i].buffer1); 1522 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1523 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", 1523 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1524 (int)np->rx_ring); 1524 (int)np->rx_ring);
1525 for (i = 0; i < RX_RING_SIZE; i++) { 1525 for (i = 0; i < RX_RING_SIZE; i++) {
1526 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", 1526 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 11a0ba47b677..027f7aba26af 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -486,12 +486,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
486{ 486{
487 struct tun_file *tfile = file->private_data; 487 struct tun_file *tfile = file->private_data;
488 struct tun_struct *tun = __tun_get(tfile); 488 struct tun_struct *tun = __tun_get(tfile);
489 struct sock *sk = tun->sk; 489 struct sock *sk;
490 unsigned int mask = 0; 490 unsigned int mask = 0;
491 491
492 if (!tun) 492 if (!tun)
493 return POLLERR; 493 return POLLERR;
494 494
495 sk = tun->sk;
496
495 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); 497 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
496 498
497 poll_wait(file, &tun->socket.wait, wait); 499 poll_wait(file, &tun->socket.wait, wait);
@@ -1324,20 +1326,22 @@ static int tun_chr_close(struct inode *inode, struct file *file)
1324 struct tun_file *tfile = file->private_data; 1326 struct tun_file *tfile = file->private_data;
1325 struct tun_struct *tun; 1327 struct tun_struct *tun;
1326 1328
1327
1328 rtnl_lock();
1329 tun = __tun_get(tfile); 1329 tun = __tun_get(tfile);
1330 if (tun) { 1330 if (tun) {
1331 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name); 1331 struct net_device *dev = tun->dev;
1332
1333 DBG(KERN_INFO "%s: tun_chr_close\n", dev->name);
1332 1334
1333 __tun_detach(tun); 1335 __tun_detach(tun);
1334 1336
1335 /* If desireable, unregister the netdevice. */ 1337 /* If desireable, unregister the netdevice. */
1336 if (!(tun->flags & TUN_PERSIST)) 1338 if (!(tun->flags & TUN_PERSIST)) {
1337 unregister_netdevice(tun->dev); 1339 rtnl_lock();
1338 1340 if (dev->reg_state == NETREG_REGISTERED)
1341 unregister_netdevice(dev);
1342 rtnl_unlock();
1343 }
1339 } 1344 }
1340 rtnl_unlock();
1341 1345
1342 tun = tfile->tun; 1346 tun = tfile->tun;
1343 if (tun) 1347 if (tun)
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 40c6eba775ce..3b957e6412ee 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1590,13 +1590,13 @@ static int init_phy(struct net_device *dev)
1590 priv->oldspeed = 0; 1590 priv->oldspeed = 0;
1591 priv->oldduplex = -1; 1591 priv->oldduplex = -1;
1592 1592
1593 if (!ug_info->phy_node)
1594 return 0;
1595
1596 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, 1593 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
1597 priv->phy_interface); 1594 priv->phy_interface);
1595 if (!phydev)
1596 phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1597 priv->phy_interface);
1598 if (!phydev) { 1598 if (!phydev) {
1599 printk("%s: Could not attach to PHY\n", dev->name); 1599 dev_err(&dev->dev, "Could not attach to PHY\n");
1600 return -ENODEV; 1600 return -ENODEV;
1601 } 1601 }
1602 1602
@@ -3608,9 +3608,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3608 struct ucc_geth_private *ugeth = NULL; 3608 struct ucc_geth_private *ugeth = NULL;
3609 struct ucc_geth_info *ug_info; 3609 struct ucc_geth_info *ug_info;
3610 struct resource res; 3610 struct resource res;
3611 struct device_node *phy;
3612 int err, ucc_num, max_speed = 0; 3611 int err, ucc_num, max_speed = 0;
3613 const u32 *fixed_link;
3614 const unsigned int *prop; 3612 const unsigned int *prop;
3615 const char *sprop; 3613 const char *sprop;
3616 const void *mac_addr; 3614 const void *mac_addr;
@@ -3708,15 +3706,8 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3708 3706
3709 ug_info->uf_info.regs = res.start; 3707 ug_info->uf_info.regs = res.start;
3710 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3708 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3711 fixed_link = of_get_property(np, "fixed-link", NULL); 3709
3712 if (fixed_link) { 3710 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
3713 phy = NULL;
3714 } else {
3715 phy = of_parse_phandle(np, "phy-handle", 0);
3716 if (phy == NULL)
3717 return -ENODEV;
3718 }
3719 ug_info->phy_node = phy;
3720 3711
3721 /* Find the TBI PHY node. If it's not there, we don't support SGMII */ 3712 /* Find the TBI PHY node. If it's not there, we don't support SGMII */
3722 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 3713 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
@@ -3725,7 +3716,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3725 prop = of_get_property(np, "phy-connection-type", NULL); 3716 prop = of_get_property(np, "phy-connection-type", NULL);
3726 if (!prop) { 3717 if (!prop) {
3727 /* handle interface property present in old trees */ 3718 /* handle interface property present in old trees */
3728 prop = of_get_property(phy, "interface", NULL); 3719 prop = of_get_property(ug_info->phy_node, "interface", NULL);
3729 if (prop != NULL) { 3720 if (prop != NULL) {
3730 phy_interface = enet_to_phy_interface[*prop]; 3721 phy_interface = enet_to_phy_interface[*prop];
3731 max_speed = enet_to_speed[*prop]; 3722 max_speed = enet_to_speed[*prop];
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index a906d3998131..c47237c2d638 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -369,4 +369,12 @@ config USB_NET_INT51X1
369 (Powerline Communications) solution with an Intellon 369 (Powerline Communications) solution with an Intellon
370 INT51x1/INT5200 chip, like the "devolo dLan duo". 370 INT51x1/INT5200 chip, like the "devolo dLan duo".
371 371
372config USB_CDC_PHONET
373 tristate "CDC Phonet support"
374 depends on PHONET
375 help
376 Choose this option to support the Phonet interface to a Nokia
377 cellular modem, as found on most Nokia handsets with the
378 "PC suite" USB profile.
379
372endmenu 380endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b870b0b1cbe0..e17afb78f372 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -21,4 +21,5 @@ obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
21obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o 21obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
22obj-$(CONFIG_USB_USBNET) += usbnet.o 22obj-$(CONFIG_USB_USBNET) += usbnet.o
23obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o 23obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
24obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
24 25
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
new file mode 100644
index 000000000000..792af72da8ac
--- /dev/null
+++ b/drivers/net/usb/cdc-phonet.c
@@ -0,0 +1,461 @@
1/*
2 * phonet.c -- USB CDC Phonet host driver
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation. All rights reserved.
5 *
6 * Author: Rémi Denis-Courmont
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/usb.h>
26#include <linux/usb/cdc.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_phonet.h>
30
31#define PN_MEDIA_USB 0x1B
32
33static const unsigned rxq_size = 17;
34
35struct usbpn_dev {
36 struct net_device *dev;
37
38 struct usb_interface *intf, *data_intf;
39 struct usb_device *usb;
40 unsigned int tx_pipe, rx_pipe;
41 u8 active_setting;
42 u8 disconnected;
43
44 unsigned tx_queue;
45 spinlock_t tx_lock;
46
47 spinlock_t rx_lock;
48 struct sk_buff *rx_skb;
49 struct urb *urbs[0];
50};
51
52static void tx_complete(struct urb *req);
53static void rx_complete(struct urb *req);
54
55/*
56 * Network device callbacks
57 */
58static int usbpn_xmit(struct sk_buff *skb, struct net_device *dev)
59{
60 struct usbpn_dev *pnd = netdev_priv(dev);
61 struct urb *req = NULL;
62 unsigned long flags;
63 int err;
64
65 if (skb->protocol != htons(ETH_P_PHONET))
66 goto drop;
67
68 req = usb_alloc_urb(0, GFP_ATOMIC);
69 if (!req)
70 goto drop;
71 usb_fill_bulk_urb(req, pnd->usb, pnd->tx_pipe, skb->data, skb->len,
72 tx_complete, skb);
73 req->transfer_flags = URB_ZERO_PACKET;
74 err = usb_submit_urb(req, GFP_ATOMIC);
75 if (err) {
76 usb_free_urb(req);
77 goto drop;
78 }
79
80 spin_lock_irqsave(&pnd->tx_lock, flags);
81 pnd->tx_queue++;
82 if (pnd->tx_queue >= dev->tx_queue_len)
83 netif_stop_queue(dev);
84 spin_unlock_irqrestore(&pnd->tx_lock, flags);
85 return 0;
86
87drop:
88 dev_kfree_skb(skb);
89 dev->stats.tx_dropped++;
90 return 0;
91}
92
93static void tx_complete(struct urb *req)
94{
95 struct sk_buff *skb = req->context;
96 struct net_device *dev = skb->dev;
97 struct usbpn_dev *pnd = netdev_priv(dev);
98
99 switch (req->status) {
100 case 0:
101 dev->stats.tx_bytes += skb->len;
102 break;
103
104 case -ENOENT:
105 case -ECONNRESET:
106 case -ESHUTDOWN:
107 dev->stats.tx_aborted_errors++;
108 default:
109 dev->stats.tx_errors++;
110 dev_dbg(&dev->dev, "TX error (%d)\n", req->status);
111 }
112 dev->stats.tx_packets++;
113
114 spin_lock(&pnd->tx_lock);
115 pnd->tx_queue--;
116 netif_wake_queue(dev);
117 spin_unlock(&pnd->tx_lock);
118
119 dev_kfree_skb_any(skb);
120 usb_free_urb(req);
121}
122
123static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
124{
125 struct net_device *dev = pnd->dev;
126 struct page *page;
127 int err;
128
129 page = __netdev_alloc_page(dev, gfp_flags);
130 if (!page)
131 return -ENOMEM;
132
133 usb_fill_bulk_urb(req, pnd->usb, pnd->rx_pipe, page_address(page),
134 PAGE_SIZE, rx_complete, dev);
135 req->transfer_flags = 0;
136 err = usb_submit_urb(req, gfp_flags);
137 if (unlikely(err)) {
138 dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
139 netdev_free_page(dev, page);
140 }
141 return err;
142}
143
144static void rx_complete(struct urb *req)
145{
146 struct net_device *dev = req->context;
147 struct usbpn_dev *pnd = netdev_priv(dev);
148 struct page *page = virt_to_page(req->transfer_buffer);
149 struct sk_buff *skb;
150 unsigned long flags;
151
152 switch (req->status) {
153 case 0:
154 spin_lock_irqsave(&pnd->rx_lock, flags);
155 skb = pnd->rx_skb;
156 if (!skb) {
157 skb = pnd->rx_skb = netdev_alloc_skb(dev, 12);
158 if (likely(skb)) {
159 /* Can't use pskb_pull() on page in IRQ */
160 memcpy(skb_put(skb, 1), page_address(page), 1);
161 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
162 page, 1, req->actual_length);
163 page = NULL;
164 }
165 } else {
166 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
167 page, 0, req->actual_length);
168 page = NULL;
169 }
170 if (req->actual_length < PAGE_SIZE)
171 pnd->rx_skb = NULL; /* Last fragment */
172 else
173 skb = NULL;
174 spin_unlock_irqrestore(&pnd->rx_lock, flags);
175 if (skb) {
176 skb->protocol = htons(ETH_P_PHONET);
177 skb_reset_mac_header(skb);
178 __skb_pull(skb, 1);
179 skb->dev = dev;
180 dev->stats.rx_packets++;
181 dev->stats.rx_bytes += skb->len;
182
183 netif_rx(skb);
184 }
185 goto resubmit;
186
187 case -ENOENT:
188 case -ECONNRESET:
189 case -ESHUTDOWN:
190 req = NULL;
191 break;
192
193 case -EOVERFLOW:
194 dev->stats.rx_over_errors++;
195 dev_dbg(&dev->dev, "RX overflow\n");
196 break;
197
198 case -EILSEQ:
199 dev->stats.rx_crc_errors++;
200 break;
201 }
202
203 dev->stats.rx_errors++;
204resubmit:
205 if (page)
206 netdev_free_page(dev, page);
207 if (req)
208 rx_submit(pnd, req, GFP_ATOMIC);
209}
210
211static int usbpn_close(struct net_device *dev);
212
213static int usbpn_open(struct net_device *dev)
214{
215 struct usbpn_dev *pnd = netdev_priv(dev);
216 int err;
217 unsigned i;
218 unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber;
219
220 err = usb_set_interface(pnd->usb, num, pnd->active_setting);
221 if (err)
222 return err;
223
224 for (i = 0; i < rxq_size; i++) {
225 struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
226
227 if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
228 usbpn_close(dev);
229 return -ENOMEM;
230 }
231 pnd->urbs[i] = req;
232 }
233
234 netif_wake_queue(dev);
235 return 0;
236}
237
238static int usbpn_close(struct net_device *dev)
239{
240 struct usbpn_dev *pnd = netdev_priv(dev);
241 unsigned i;
242 unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber;
243
244 netif_stop_queue(dev);
245
246 for (i = 0; i < rxq_size; i++) {
247 struct urb *req = pnd->urbs[i];
248
249 if (!req)
250 continue;
251 usb_kill_urb(req);
252 usb_free_urb(req);
253 pnd->urbs[i] = NULL;
254 }
255
256 return usb_set_interface(pnd->usb, num, !pnd->active_setting);
257}
258
259static int usbpn_set_mtu(struct net_device *dev, int new_mtu)
260{
261 if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU))
262 return -EINVAL;
263
264 dev->mtu = new_mtu;
265 return 0;
266}
267
268static const struct net_device_ops usbpn_ops = {
269 .ndo_open = usbpn_open,
270 .ndo_stop = usbpn_close,
271 .ndo_start_xmit = usbpn_xmit,
272 .ndo_change_mtu = usbpn_set_mtu,
273};
274
275static void usbpn_setup(struct net_device *dev)
276{
277 dev->features = 0;
278 dev->netdev_ops = &usbpn_ops,
279 dev->header_ops = &phonet_header_ops;
280 dev->type = ARPHRD_PHONET;
281 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
282 dev->mtu = PHONET_MAX_MTU;
283 dev->hard_header_len = 1;
284 dev->dev_addr[0] = PN_MEDIA_USB;
285 dev->addr_len = 1;
286 dev->tx_queue_len = 3;
287
288 dev->destructor = free_netdev;
289}
290
291/*
292 * USB driver callbacks
293 */
294static struct usb_device_id usbpn_ids[] = {
295 {
296 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
297 | USB_DEVICE_ID_MATCH_INT_CLASS
298 | USB_DEVICE_ID_MATCH_INT_SUBCLASS,
299 .idVendor = 0x0421, /* Nokia */
300 .bInterfaceClass = USB_CLASS_COMM,
301 .bInterfaceSubClass = 0xFE,
302 },
303 { },
304};
305
306MODULE_DEVICE_TABLE(usb, usbpn_ids);
307
308static struct usb_driver usbpn_driver;
309
310int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
311{
312 static const char ifname[] = "usbpn%d";
313 const struct usb_cdc_union_desc *union_header = NULL;
314 const struct usb_cdc_header_desc *phonet_header = NULL;
315 const struct usb_host_interface *data_desc;
316 struct usb_interface *data_intf;
317 struct usb_device *usbdev = interface_to_usbdev(intf);
318 struct net_device *dev;
319 struct usbpn_dev *pnd;
320 u8 *data;
321 int len, err;
322
323 data = intf->altsetting->extra;
324 len = intf->altsetting->extralen;
325 while (len >= 3) {
326 u8 dlen = data[0];
327 if (dlen < 3)
328 return -EINVAL;
329
330 /* bDescriptorType */
331 if (data[1] == USB_DT_CS_INTERFACE) {
332 /* bDescriptorSubType */
333 switch (data[2]) {
334 case USB_CDC_UNION_TYPE:
335 if (union_header || dlen < 5)
336 break;
337 union_header =
338 (struct usb_cdc_union_desc *)data;
339 break;
340 case 0xAB:
341 if (phonet_header || dlen < 5)
342 break;
343 phonet_header =
344 (struct usb_cdc_header_desc *)data;
345 break;
346 }
347 }
348 data += dlen;
349 len -= dlen;
350 }
351
352 if (!union_header || !phonet_header)
353 return -EINVAL;
354
355 data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0);
356 if (data_intf == NULL)
357 return -ENODEV;
358 /* Data interface has one inactive and one active setting */
359 if (data_intf->num_altsetting != 2)
360 return -EINVAL;
361 if (data_intf->altsetting[0].desc.bNumEndpoints == 0
362 && data_intf->altsetting[1].desc.bNumEndpoints == 2)
363 data_desc = data_intf->altsetting + 1;
364 else
365 if (data_intf->altsetting[0].desc.bNumEndpoints == 2
366 && data_intf->altsetting[1].desc.bNumEndpoints == 0)
367 data_desc = data_intf->altsetting;
368 else
369 return -EINVAL;
370
371 dev = alloc_netdev(sizeof(*pnd) + sizeof(pnd->urbs[0]) * rxq_size,
372 ifname, usbpn_setup);
373 if (!dev)
374 return -ENOMEM;
375
376 pnd = netdev_priv(dev);
377 SET_NETDEV_DEV(dev, &intf->dev);
378 netif_stop_queue(dev);
379
380 pnd->dev = dev;
381 pnd->usb = usb_get_dev(usbdev);
382 pnd->intf = intf;
383 pnd->data_intf = data_intf;
384 spin_lock_init(&pnd->tx_lock);
385 spin_lock_init(&pnd->rx_lock);
386 /* Endpoints */
387 if (usb_pipein(data_desc->endpoint[0].desc.bEndpointAddress)) {
388 pnd->rx_pipe = usb_rcvbulkpipe(usbdev,
389 data_desc->endpoint[0].desc.bEndpointAddress);
390 pnd->tx_pipe = usb_sndbulkpipe(usbdev,
391 data_desc->endpoint[1].desc.bEndpointAddress);
392 } else {
393 pnd->rx_pipe = usb_rcvbulkpipe(usbdev,
394 data_desc->endpoint[1].desc.bEndpointAddress);
395 pnd->tx_pipe = usb_sndbulkpipe(usbdev,
396 data_desc->endpoint[0].desc.bEndpointAddress);
397 }
398 pnd->active_setting = data_desc - data_intf->altsetting;
399
400 err = usb_driver_claim_interface(&usbpn_driver, data_intf, pnd);
401 if (err)
402 goto out;
403
404 /* Force inactive mode until the network device is brought UP */
405 usb_set_interface(usbdev, union_header->bSlaveInterface0,
406 !pnd->active_setting);
407 usb_set_intfdata(intf, pnd);
408
409 err = register_netdev(dev);
410 if (err) {
411 usb_driver_release_interface(&usbpn_driver, data_intf);
412 goto out;
413 }
414
415 dev_dbg(&dev->dev, "USB CDC Phonet device found\n");
416 return 0;
417
418out:
419 usb_set_intfdata(intf, NULL);
420 free_netdev(dev);
421 return err;
422}
423
424static void usbpn_disconnect(struct usb_interface *intf)
425{
426 struct usbpn_dev *pnd = usb_get_intfdata(intf);
427 struct usb_device *usb = pnd->usb;
428
429 if (pnd->disconnected)
430 return;
431
432 pnd->disconnected = 1;
433 usb_driver_release_interface(&usbpn_driver,
434 (pnd->intf == intf) ? pnd->data_intf : pnd->intf);
435 unregister_netdev(pnd->dev);
436 usb_put_dev(usb);
437}
438
439static struct usb_driver usbpn_driver = {
440 .name = "cdc_phonet",
441 .probe = usbpn_probe,
442 .disconnect = usbpn_disconnect,
443 .id_table = usbpn_ids,
444};
445
446static int __init usbpn_init(void)
447{
448 return usb_register(&usbpn_driver);
449}
450
451static void __exit usbpn_exit(void)
452{
453 usb_deregister(&usbpn_driver);
454}
455
456module_init(usbpn_init);
457module_exit(usbpn_exit);
458
459MODULE_AUTHOR("Remi Denis-Courmont");
460MODULE_DESCRIPTION("USB CDC Phonet host interface");
461MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 80e01778dd3b..45cebfb302cf 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -311,7 +311,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
311 * bmCRC = 0 : CRC = 0xDEADBEEF 311 * bmCRC = 0 : CRC = 0xDEADBEEF
312 */ 312 */
313 if (header & BIT(14)) 313 if (header & BIT(14))
314 crc2 = ~crc32_le(~0, skb2->data, len); 314 crc2 = ~crc32_le(~0, skb2->data, skb2->len);
315 else 315 else
316 crc2 = 0xdeadbeef; 316 crc2 = 0xdeadbeef;
317 317
@@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
319 return crc == crc2; 319 return crc == crc2;
320 320
321 if (unlikely(crc != crc2)) { 321 if (unlikely(crc != crc2)) {
322 dev->stats.rx_errors++; 322 dev->net->stats.rx_errors++;
323 dev_kfree_skb_any(skb2); 323 dev_kfree_skb_any(skb2);
324 } else 324 } else
325 usbnet_skb_return(dev, skb2); 325 usbnet_skb_return(dev, skb2);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 7ae82446b93a..1d3730d6690f 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
513 len = (skb->data[1] | (skb->data[2] << 8)) - 4; 513 len = (skb->data[1] | (skb->data[2] << 8)) - 4;
514 514
515 if (unlikely(status & 0xbf)) { 515 if (unlikely(status & 0xbf)) {
516 if (status & 0x01) dev->stats.rx_fifo_errors++; 516 if (status & 0x01) dev->net->stats.rx_fifo_errors++;
517 if (status & 0x02) dev->stats.rx_crc_errors++; 517 if (status & 0x02) dev->net->stats.rx_crc_errors++;
518 if (status & 0x04) dev->stats.rx_frame_errors++; 518 if (status & 0x04) dev->net->stats.rx_frame_errors++;
519 if (status & 0x20) dev->stats.rx_missed_errors++; 519 if (status & 0x20) dev->net->stats.rx_missed_errors++;
520 if (status & 0x90) dev->stats.rx_length_errors++; 520 if (status & 0x90) dev->net->stats.rx_length_errors++;
521 return 0; 521 return 0;
522 } 522 }
523 523
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index e01314789718..1f9ec29fce50 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -999,6 +999,9 @@ static const struct net_device_ops kaweth_netdev_ops = {
999 .ndo_tx_timeout = kaweth_tx_timeout, 999 .ndo_tx_timeout = kaweth_tx_timeout,
1000 .ndo_set_multicast_list = kaweth_set_rx_mode, 1000 .ndo_set_multicast_list = kaweth_set_rx_mode,
1001 .ndo_get_stats = kaweth_netdev_stats, 1001 .ndo_get_stats = kaweth_netdev_stats,
1002 .ndo_change_mtu = eth_change_mtu,
1003 .ndo_set_mac_address = eth_mac_addr,
1004 .ndo_validate_addr = eth_validate_addr,
1002}; 1005};
1003 1006
1004static int kaweth_probe( 1007static int kaweth_probe(
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 034e8a73ca6b..aeb1ab03a9ee 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
433 dbg("rx framesize %d range %d..%d mtu %d", skb->len, 433 dbg("rx framesize %d range %d..%d mtu %d", skb->len,
434 net->hard_header_len, dev->hard_mtu, net->mtu); 434 net->hard_header_len, dev->hard_mtu, net->mtu);
435#endif 435#endif
436 dev->stats.rx_frame_errors++; 436 dev->net->stats.rx_frame_errors++;
437 nc_ensure_sync(dev); 437 nc_ensure_sync(dev);
438 return 0; 438 return 0;
439 } 439 }
@@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
442 hdr_len = le16_to_cpup(&header->hdr_len); 442 hdr_len = le16_to_cpup(&header->hdr_len);
443 packet_len = le16_to_cpup(&header->packet_len); 443 packet_len = le16_to_cpup(&header->packet_len);
444 if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { 444 if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
445 dev->stats.rx_frame_errors++; 445 dev->net->stats.rx_frame_errors++;
446 dbg("packet too big, %d", packet_len); 446 dbg("packet too big, %d", packet_len);
447 nc_ensure_sync(dev); 447 nc_ensure_sync(dev);
448 return 0; 448 return 0;
449 } else if (hdr_len < MIN_HEADER) { 449 } else if (hdr_len < MIN_HEADER) {
450 dev->stats.rx_frame_errors++; 450 dev->net->stats.rx_frame_errors++;
451 dbg("header too short, %d", hdr_len); 451 dbg("header too short, %d", hdr_len);
452 nc_ensure_sync(dev); 452 nc_ensure_sync(dev);
453 return 0; 453 return 0;
@@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
465 465
466 if ((packet_len & 0x01) == 0) { 466 if ((packet_len & 0x01) == 0) {
467 if (skb->data [packet_len] != PAD_BYTE) { 467 if (skb->data [packet_len] != PAD_BYTE) {
468 dev->stats.rx_frame_errors++; 468 dev->net->stats.rx_frame_errors++;
469 dbg("bad pad"); 469 dbg("bad pad");
470 return 0; 470 return 0;
471 } 471 }
472 skb_trim(skb, skb->len - 1); 472 skb_trim(skb, skb->len - 1);
473 } 473 }
474 if (skb->len != packet_len) { 474 if (skb->len != packet_len) {
475 dev->stats.rx_frame_errors++; 475 dev->net->stats.rx_frame_errors++;
476 dbg("bad packet len %d (expected %d)", 476 dbg("bad packet len %d (expected %d)",
477 skb->len, packet_len); 477 skb->len, packet_len);
478 nc_ensure_sync(dev); 478 nc_ensure_sync(dev);
479 return 0; 479 return 0;
480 } 480 }
481 if (header->packet_id != get_unaligned(&trailer->packet_id)) { 481 if (header->packet_id != get_unaligned(&trailer->packet_id)) {
482 dev->stats.rx_fifo_errors++; 482 dev->net->stats.rx_fifo_errors++;
483 dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", 483 dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
484 le16_to_cpu(header->packet_id), 484 le16_to_cpu(header->packet_id),
485 le16_to_cpu(trailer->packet_id)); 485 le16_to_cpu(trailer->packet_id));
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 73acbd244aa1..631d269ac980 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1493,6 +1493,9 @@ static const struct net_device_ops pegasus_netdev_ops = {
1493 .ndo_set_multicast_list = pegasus_set_multicast, 1493 .ndo_set_multicast_list = pegasus_set_multicast,
1494 .ndo_get_stats = pegasus_netdev_stats, 1494 .ndo_get_stats = pegasus_netdev_stats,
1495 .ndo_tx_timeout = pegasus_tx_timeout, 1495 .ndo_tx_timeout = pegasus_tx_timeout,
1496 .ndo_change_mtu = eth_change_mtu,
1497 .ndo_set_mac_address = eth_mac_addr,
1498 .ndo_validate_addr = eth_validate_addr,
1496}; 1499};
1497 1500
1498static struct usb_driver pegasus_driver = { 1501static struct usb_driver pegasus_driver = {
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 1bf243ef950e..2232232b7989 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
487 if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET 487 if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
488 || skb->len < msg_len 488 || skb->len < msg_len
489 || (data_offset + data_len + 8) > msg_len)) { 489 || (data_offset + data_len + 8) > msg_len)) {
490 dev->stats.rx_frame_errors++; 490 dev->net->stats.rx_frame_errors++;
491 devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d", 491 devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
492 le32_to_cpu(hdr->msg_type), 492 le32_to_cpu(hdr->msg_type),
493 msg_len, data_offset, data_len, skb->len); 493 msg_len, data_offset, data_len, skb->len);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 89a91f8c22de..fe045896406b 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1108 if (unlikely(header & RX_STS_ES_)) { 1108 if (unlikely(header & RX_STS_ES_)) {
1109 if (netif_msg_rx_err(dev)) 1109 if (netif_msg_rx_err(dev))
1110 devdbg(dev, "Error header=0x%08x", header); 1110 devdbg(dev, "Error header=0x%08x", header);
1111 dev->stats.rx_errors++; 1111 dev->net->stats.rx_errors++;
1112 dev->stats.rx_dropped++; 1112 dev->net->stats.rx_dropped++;
1113 1113
1114 if (header & RX_STS_CRC_) { 1114 if (header & RX_STS_CRC_) {
1115 dev->stats.rx_crc_errors++; 1115 dev->net->stats.rx_crc_errors++;
1116 } else { 1116 } else {
1117 if (header & (RX_STS_TL_ | RX_STS_RF_)) 1117 if (header & (RX_STS_TL_ | RX_STS_RF_))
1118 dev->stats.rx_frame_errors++; 1118 dev->net->stats.rx_frame_errors++;
1119 1119
1120 if ((header & RX_STS_LE_) && 1120 if ((header & RX_STS_LE_) &&
1121 (!(header & RX_STS_FT_))) 1121 (!(header & RX_STS_FT_)))
1122 dev->stats.rx_length_errors++; 1122 dev->net->stats.rx_length_errors++;
1123 } 1123 }
1124 } else { 1124 } else {
1125 /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ 1125 /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 22c0585a0319..edfd9e10ceba 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
234 int status; 234 int status;
235 235
236 skb->protocol = eth_type_trans (skb, dev->net); 236 skb->protocol = eth_type_trans (skb, dev->net);
237 dev->stats.rx_packets++; 237 dev->net->stats.rx_packets++;
238 dev->stats.rx_bytes += skb->len; 238 dev->net->stats.rx_bytes += skb->len;
239 239
240 if (netif_msg_rx_status (dev)) 240 if (netif_msg_rx_status (dev))
241 devdbg (dev, "< rx, len %zu, type 0x%x", 241 devdbg (dev, "< rx, len %zu, type 0x%x",
@@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
397 if (netif_msg_rx_err (dev)) 397 if (netif_msg_rx_err (dev))
398 devdbg (dev, "drop"); 398 devdbg (dev, "drop");
399error: 399error:
400 dev->stats.rx_errors++; 400 dev->net->stats.rx_errors++;
401 skb_queue_tail (&dev->done, skb); 401 skb_queue_tail (&dev->done, skb);
402 } 402 }
403} 403}
@@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb)
420 case 0: 420 case 0:
421 if (skb->len < dev->net->hard_header_len) { 421 if (skb->len < dev->net->hard_header_len) {
422 entry->state = rx_cleanup; 422 entry->state = rx_cleanup;
423 dev->stats.rx_errors++; 423 dev->net->stats.rx_errors++;
424 dev->stats.rx_length_errors++; 424 dev->net->stats.rx_length_errors++;
425 if (netif_msg_rx_err (dev)) 425 if (netif_msg_rx_err (dev))
426 devdbg (dev, "rx length %d", skb->len); 426 devdbg (dev, "rx length %d", skb->len);
427 } 427 }
@@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb)
433 * storm, recovering as needed. 433 * storm, recovering as needed.
434 */ 434 */
435 case -EPIPE: 435 case -EPIPE:
436 dev->stats.rx_errors++; 436 dev->net->stats.rx_errors++;
437 usbnet_defer_kevent (dev, EVENT_RX_HALT); 437 usbnet_defer_kevent (dev, EVENT_RX_HALT);
438 // FALLTHROUGH 438 // FALLTHROUGH
439 439
@@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb)
451 case -EPROTO: 451 case -EPROTO:
452 case -ETIME: 452 case -ETIME:
453 case -EILSEQ: 453 case -EILSEQ:
454 dev->stats.rx_errors++; 454 dev->net->stats.rx_errors++;
455 if (!timer_pending (&dev->delay)) { 455 if (!timer_pending (&dev->delay)) {
456 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 456 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
457 if (netif_msg_link (dev)) 457 if (netif_msg_link (dev))
@@ -465,12 +465,12 @@ block:
465 465
466 /* data overrun ... flush fifo? */ 466 /* data overrun ... flush fifo? */
467 case -EOVERFLOW: 467 case -EOVERFLOW:
468 dev->stats.rx_over_errors++; 468 dev->net->stats.rx_over_errors++;
469 // FALLTHROUGH 469 // FALLTHROUGH
470 470
471 default: 471 default:
472 entry->state = rx_cleanup; 472 entry->state = rx_cleanup;
473 dev->stats.rx_errors++; 473 dev->net->stats.rx_errors++;
474 if (netif_msg_rx_err (dev)) 474 if (netif_msg_rx_err (dev))
475 devdbg (dev, "rx status %d", urb_status); 475 devdbg (dev, "rx status %d", urb_status);
476 break; 476 break;
@@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net)
583 583
584 if (netif_msg_ifdown (dev)) 584 if (netif_msg_ifdown (dev))
585 devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", 585 devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
586 dev->stats.rx_packets, dev->stats.tx_packets, 586 net->stats.rx_packets, net->stats.tx_packets,
587 dev->stats.rx_errors, dev->stats.tx_errors 587 net->stats.rx_errors, net->stats.tx_errors
588 ); 588 );
589 589
590 // ensure there are no more active urbs 590 // ensure there are no more active urbs
@@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb)
891 struct usbnet *dev = entry->dev; 891 struct usbnet *dev = entry->dev;
892 892
893 if (urb->status == 0) { 893 if (urb->status == 0) {
894 dev->stats.tx_packets++; 894 dev->net->stats.tx_packets++;
895 dev->stats.tx_bytes += entry->length; 895 dev->net->stats.tx_bytes += entry->length;
896 } else { 896 } else {
897 dev->stats.tx_errors++; 897 dev->net->stats.tx_errors++;
898 898
899 switch (urb->status) { 899 switch (urb->status) {
900 case -EPIPE: 900 case -EPIPE:
@@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
1020 devdbg (dev, "drop, code %d", retval); 1020 devdbg (dev, "drop, code %d", retval);
1021drop: 1021drop:
1022 retval = NET_XMIT_SUCCESS; 1022 retval = NET_XMIT_SUCCESS;
1023 dev->stats.tx_dropped++; 1023 dev->net->stats.tx_dropped++;
1024 if (skb) 1024 if (skb)
1025 dev_kfree_skb_any (skb); 1025 dev_kfree_skb_any (skb);
1026 usb_free_urb (urb); 1026 usb_free_urb (urb);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 87197dd9c788..1097c72e44d5 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -208,11 +208,14 @@ rx_drop:
208 208
209static struct net_device_stats *veth_get_stats(struct net_device *dev) 209static struct net_device_stats *veth_get_stats(struct net_device *dev)
210{ 210{
211 struct veth_priv *priv = netdev_priv(dev); 211 struct veth_priv *priv;
212 struct net_device_stats *dev_stats = &dev->stats; 212 struct net_device_stats *dev_stats;
213 unsigned int cpu; 213 int cpu;
214 struct veth_net_stats *stats; 214 struct veth_net_stats *stats;
215 215
216 priv = netdev_priv(dev);
217 dev_stats = &dev->stats;
218
216 dev_stats->rx_packets = 0; 219 dev_stats->rx_packets = 0;
217 dev_stats->tx_packets = 0; 220 dev_stats->tx_packets = 0;
218 dev_stats->rx_bytes = 0; 221 dev_stats->rx_bytes = 0;
@@ -220,17 +223,16 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev)
220 dev_stats->tx_dropped = 0; 223 dev_stats->tx_dropped = 0;
221 dev_stats->rx_dropped = 0; 224 dev_stats->rx_dropped = 0;
222 225
223 if (priv->stats) 226 for_each_online_cpu(cpu) {
224 for_each_online_cpu(cpu) { 227 stats = per_cpu_ptr(priv->stats, cpu);
225 stats = per_cpu_ptr(priv->stats, cpu);
226 228
227 dev_stats->rx_packets += stats->rx_packets; 229 dev_stats->rx_packets += stats->rx_packets;
228 dev_stats->tx_packets += stats->tx_packets; 230 dev_stats->tx_packets += stats->tx_packets;
229 dev_stats->rx_bytes += stats->rx_bytes; 231 dev_stats->rx_bytes += stats->rx_bytes;
230 dev_stats->tx_bytes += stats->tx_bytes; 232 dev_stats->tx_bytes += stats->tx_bytes;
231 dev_stats->tx_dropped += stats->tx_dropped; 233 dev_stats->tx_dropped += stats->tx_dropped;
232 dev_stats->rx_dropped += stats->rx_dropped; 234 dev_stats->rx_dropped += stats->rx_dropped;
233 } 235 }
234 236
235 return dev_stats; 237 return dev_stats;
236} 238}
@@ -257,8 +259,6 @@ static int veth_close(struct net_device *dev)
257 netif_carrier_off(dev); 259 netif_carrier_off(dev);
258 netif_carrier_off(priv->peer); 260 netif_carrier_off(priv->peer);
259 261
260 free_percpu(priv->stats);
261 priv->stats = NULL;
262 return 0; 262 return 0;
263} 263}
264 264
@@ -289,6 +289,15 @@ static int veth_dev_init(struct net_device *dev)
289 return 0; 289 return 0;
290} 290}
291 291
292static void veth_dev_free(struct net_device *dev)
293{
294 struct veth_priv *priv;
295
296 priv = netdev_priv(dev);
297 free_percpu(priv->stats);
298 free_netdev(dev);
299}
300
292static const struct net_device_ops veth_netdev_ops = { 301static const struct net_device_ops veth_netdev_ops = {
293 .ndo_init = veth_dev_init, 302 .ndo_init = veth_dev_init,
294 .ndo_open = veth_open, 303 .ndo_open = veth_open,
@@ -306,7 +315,7 @@ static void veth_setup(struct net_device *dev)
306 dev->netdev_ops = &veth_netdev_ops; 315 dev->netdev_ops = &veth_netdev_ops;
307 dev->ethtool_ops = &veth_ethtool_ops; 316 dev->ethtool_ops = &veth_ethtool_ops;
308 dev->features |= NETIF_F_LLTX; 317 dev->features |= NETIF_F_LLTX;
309 dev->destructor = free_netdev; 318 dev->destructor = veth_dev_free;
310} 319}
311 320
312/* 321/*
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index d3489a3c4c03..88c30a58b4bd 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -621,6 +621,7 @@ static const struct net_device_ops rhine_netdev_ops = {
621 .ndo_start_xmit = rhine_start_tx, 621 .ndo_start_xmit = rhine_start_tx,
622 .ndo_get_stats = rhine_get_stats, 622 .ndo_get_stats = rhine_get_stats,
623 .ndo_set_multicast_list = rhine_set_rx_mode, 623 .ndo_set_multicast_list = rhine_set_rx_mode,
624 .ndo_change_mtu = eth_change_mtu,
624 .ndo_validate_addr = eth_validate_addr, 625 .ndo_validate_addr = eth_validate_addr,
625 .ndo_set_mac_address = eth_mac_addr, 626 .ndo_set_mac_address = eth_mac_addr,
626 .ndo_do_ioctl = netdev_ioctl, 627 .ndo_do_ioctl = netdev_ioctl,
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 223238de475c..1ea1ef6c3b96 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -584,8 +584,9 @@ static void sca_dump_rings(struct net_device *dev)
584 sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in"); 584 sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in");
585 for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++) 585 for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
586 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat))); 586 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
587 printk(KERN_CONT "\n");
587 588
588 printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " 589 printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
589 "last=%u %sactive", 590 "last=%u %sactive",
590 sca_inw(get_dmac_tx(port) + CDAL, card), 591 sca_inw(get_dmac_tx(port) + CDAL, card),
591 sca_inw(get_dmac_tx(port) + EDAL, card), 592 sca_inw(get_dmac_tx(port) + EDAL, card),
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 497b003d7239..f099c34a3ae2 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -529,8 +529,9 @@ static void sca_dump_rings(struct net_device *dev)
529 sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in"); 529 sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in");
530 for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++) 530 for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++)
531 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat))); 531 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
532 printk(KERN_CONT "\n");
532 533
533 printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " 534 printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
534 "last=%u %sactive", 535 "last=%u %sactive",
535 sca_inl(get_dmac_tx(port) + CDAL, card), 536 sca_inl(get_dmac_tx(port) + CDAL, card),
536 sca_inl(get_dmac_tx(port) + EDAL, card), 537 sca_inl(get_dmac_tx(port) + EDAL, card),
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 3fb9dbc88a1a..d14e95a08d66 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -326,11 +326,9 @@ sbni_pci_probe( struct net_device *dev )
326 } 326 }
327 327
328 if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs) 328 if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
329 printk( KERN_WARNING " WARNING: The PCI BIOS assigned " 329 printk( KERN_WARNING
330 "this PCI card to IRQ %d, which is unlikely " 330 " WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
331 "to work!.\n" 331 " You should use the PCI BIOS setup to assign a valid IRQ line.\n",
332 KERN_WARNING " You should use the PCI BIOS "
333 "setup to assign a valid IRQ line.\n",
334 pci_irq_line ); 332 pci_irq_line );
335 333
336 /* avoiding re-enable dual adapters */ 334 /* avoiding re-enable dual adapters */
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index c70604f0329e..8ce5e4cee168 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5918,20 +5918,19 @@ static int airo_set_essid(struct net_device *dev,
5918 readSsidRid(local, &SSID_rid); 5918 readSsidRid(local, &SSID_rid);
5919 5919
5920 /* Check if we asked for `any' */ 5920 /* Check if we asked for `any' */
5921 if(dwrq->flags == 0) { 5921 if (dwrq->flags == 0) {
5922 /* Just send an empty SSID list */ 5922 /* Just send an empty SSID list */
5923 memset(&SSID_rid, 0, sizeof(SSID_rid)); 5923 memset(&SSID_rid, 0, sizeof(SSID_rid));
5924 } else { 5924 } else {
5925 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 5925 unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
5926 5926
5927 /* Check the size of the string */ 5927 /* Check the size of the string */
5928 if(dwrq->length > IW_ESSID_MAX_SIZE) { 5928 if (dwrq->length > IW_ESSID_MAX_SIZE)
5929 return -E2BIG ; 5929 return -E2BIG ;
5930 } 5930
5931 /* Check if index is valid */ 5931 /* Check if index is valid */
5932 if((index < 0) || (index >= 4)) { 5932 if (index >= ARRAY_SIZE(SSID_rid.ssids))
5933 return -EINVAL; 5933 return -EINVAL;
5934 }
5935 5934
5936 /* Set the SSID */ 5935 /* Set the SSID */
5937 memset(SSID_rid.ssids[index].ssid, 0, 5936 memset(SSID_rid.ssids[index].ssid, 0,
@@ -6819,7 +6818,7 @@ static int airo_set_txpow(struct net_device *dev,
6819 return -EINVAL; 6818 return -EINVAL;
6820 } 6819 }
6821 clear_bit (FLAG_RADIO_OFF, &local->flags); 6820 clear_bit (FLAG_RADIO_OFF, &local->flags);
6822 for (i = 0; cap_rid.txPowerLevels[i] && (i < 8); i++) 6821 for (i = 0; i < 8 && cap_rid.txPowerLevels[i]; i++)
6823 if (v == cap_rid.txPowerLevels[i]) { 6822 if (v == cap_rid.txPowerLevels[i]) {
6824 readConfigRid(local, 1); 6823 readConfigRid(local, 1);
6825 local->config.txPower = v; 6824 local->config.txPower = v;
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index d26e7b485315..eb0337c49546 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,5 +1,6 @@
1config ATH_COMMON 1config ATH_COMMON
2 tristate "Atheros Wireless Cards" 2 tristate "Atheros Wireless Cards"
3 depends on WLAN_80211
3 depends on ATH5K || ATH9K || AR9170_USB 4 depends on ATH5K || ATH9K || AR9170_USB
4 5
5source "drivers/net/wireless/ath/ath5k/Kconfig" 6source "drivers/net/wireless/ath/ath5k/Kconfig"
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index ea045151f953..029c1bc7468f 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2970,6 +2970,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2970 if (modparam_nohwcrypt) 2970 if (modparam_nohwcrypt)
2971 return -EOPNOTSUPP; 2971 return -EOPNOTSUPP;
2972 2972
2973 if (sc->opmode == NL80211_IFTYPE_AP)
2974 return -EOPNOTSUPP;
2975
2973 switch (key->alg) { 2976 switch (key->alg) {
2974 case ALG_WEP: 2977 case ALG_WEP:
2975 case ALG_TKIP: 2978 case ALG_TKIP:
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 1aeafb511ddd..aad259b4c197 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -478,6 +478,18 @@ void ath9k_ani_reset(struct ath_hw *ah)
478 "Reset ANI state opmode %u\n", ah->opmode); 478 "Reset ANI state opmode %u\n", ah->opmode);
479 ah->stats.ast_ani_reset++; 479 ah->stats.ast_ani_reset++;
480 480
481 if (ah->opmode == NL80211_IFTYPE_AP) {
482 /*
483 * ath9k_hw_ani_control() will only process items set on
484 * ah->ani_function
485 */
486 if (IS_CHAN_2GHZ(chan))
487 ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
488 ATH9K_ANI_FIRSTEP_LEVEL);
489 else
490 ah->ani_function = 0;
491 }
492
481 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0); 493 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
482 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0); 494 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
483 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0); 495 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index a2fda702b620..ce0e86c36a82 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -460,7 +460,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
460 integer = swab32(eep->modalHeader.antCtrlCommon); 460 integer = swab32(eep->modalHeader.antCtrlCommon);
461 eep->modalHeader.antCtrlCommon = integer; 461 eep->modalHeader.antCtrlCommon = integer;
462 462
463 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 463 for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
464 integer = swab32(eep->modalHeader.antCtrlChain[i]); 464 integer = swab32(eep->modalHeader.antCtrlChain[i]);
465 eep->modalHeader.antCtrlChain[i] = integer; 465 eep->modalHeader.antCtrlChain[i] = integer;
466 } 466 }
@@ -914,7 +914,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
914 ctlMode, numCtlModes, isHt40CtlMode, 914 ctlMode, numCtlModes, isHt40CtlMode,
915 (pCtlMode[ctlMode] & EXT_ADDITIVE)); 915 (pCtlMode[ctlMode] & EXT_ADDITIVE));
916 916
917 for (i = 0; (i < AR5416_NUM_CTLS) && 917 for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
918 pEepData->ctlIndex[i]; i++) { 918 pEepData->ctlIndex[i]; i++) {
919 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 919 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
920 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x " 920 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index b61a071788a5..4ccf48e396df 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -355,7 +355,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
355 } 355 }
356 356
357 if (bf_next == NULL) { 357 if (bf_next == NULL) {
358 INIT_LIST_HEAD(&bf_head); 358 /*
359 * Make sure the last desc is reclaimed if it
360 * not a holding desc.
361 */
362 if (!bf_last->bf_stale)
363 list_move_tail(&bf->list, &bf_head);
364 else
365 INIT_LIST_HEAD(&bf_head);
359 } else { 366 } else {
360 ASSERT(!list_empty(bf_q)); 367 ASSERT(!list_empty(bf_q));
361 list_move_tail(&bf->list, &bf_head); 368 list_move_tail(&bf->list, &bf_head);
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index eef370bd1211..bf3d25ba7be1 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -474,6 +474,21 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
474 return 0; 474 return 0;
475} 475}
476 476
477/*
478 * Some users have reported their EEPROM programmed with
479 * 0x8000 set, this is not a supported regulatory domain
480 * but since we have more than one user with it we need
481 * a solution for them. We default to 0x64, which is the
482 * default Atheros world regulatory domain.
483 */
484static void ath_regd_sanitize(struct ath_regulatory *reg)
485{
486 if (reg->current_rd != COUNTRY_ERD_FLAG)
487 return;
488 printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
489 reg->current_rd = 0x64;
490}
491
477int 492int
478ath_regd_init(struct ath_regulatory *reg, 493ath_regd_init(struct ath_regulatory *reg,
479 struct wiphy *wiphy, 494 struct wiphy *wiphy,
@@ -486,6 +501,8 @@ ath_regd_init(struct ath_regulatory *reg,
486 if (!reg) 501 if (!reg)
487 return -EINVAL; 502 return -EINVAL;
488 503
504 ath_regd_sanitize(reg);
505
489 printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd); 506 printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd);
490 507
491 if (!ath_regd_is_eeprom_valid(reg)) { 508 if (!ath_regd_is_eeprom_valid(reg)) {
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index f580c2812d91..40448067e4cc 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -648,6 +648,7 @@ struct b43_wl {
648 u8 nr_devs; 648 u8 nr_devs;
649 649
650 bool radiotap_enabled; 650 bool radiotap_enabled;
651 bool radio_enabled;
651 652
652 /* The beacon we are currently using (AP or IBSS mode). 653 /* The beacon we are currently using (AP or IBSS mode).
653 * This beacon stuff is protected by the irq_lock. */ 654 * This beacon stuff is protected by the irq_lock. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 6456afebdba1..e71c8d9cd706 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3497,8 +3497,8 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3497 if (phy->ops->set_rx_antenna) 3497 if (phy->ops->set_rx_antenna)
3498 phy->ops->set_rx_antenna(dev, antenna); 3498 phy->ops->set_rx_antenna(dev, antenna);
3499 3499
3500 if (!!conf->radio_enabled != phy->radio_on) { 3500 if (wl->radio_enabled != phy->radio_on) {
3501 if (conf->radio_enabled) { 3501 if (wl->radio_enabled) {
3502 b43_software_rfkill(dev, false); 3502 b43_software_rfkill(dev, false);
3503 b43info(dev->wl, "Radio turned on by software\n"); 3503 b43info(dev->wl, "Radio turned on by software\n");
3504 if (!dev->radio_hw_enable) { 3504 if (!dev->radio_hw_enable) {
@@ -4339,6 +4339,7 @@ static int b43_op_start(struct ieee80211_hw *hw)
4339 wl->beacon0_uploaded = 0; 4339 wl->beacon0_uploaded = 0;
4340 wl->beacon1_uploaded = 0; 4340 wl->beacon1_uploaded = 0;
4341 wl->beacon_templates_virgin = 1; 4341 wl->beacon_templates_virgin = 1;
4342 wl->radio_enabled = 1;
4342 4343
4343 mutex_lock(&wl->mutex); 4344 mutex_lock(&wl->mutex);
4344 4345
@@ -4378,6 +4379,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4378 if (b43_status(dev) >= B43_STAT_STARTED) 4379 if (b43_status(dev) >= B43_STAT_STARTED)
4379 b43_wireless_core_stop(dev); 4380 b43_wireless_core_stop(dev);
4380 b43_wireless_core_exit(dev); 4381 b43_wireless_core_exit(dev);
4382 wl->radio_enabled = 0;
4381 mutex_unlock(&wl->mutex); 4383 mutex_unlock(&wl->mutex);
4382 4384
4383 cancel_work_sync(&(wl->txpower_adjust_work)); 4385 cancel_work_sync(&(wl->txpower_adjust_work));
@@ -4560,6 +4562,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
4560 B43_WARN_ON(1); 4562 B43_WARN_ON(1);
4561 4563
4562 dev->phy.gmode = have_2ghz_phy; 4564 dev->phy.gmode = have_2ghz_phy;
4565 dev->phy.radio_on = 1;
4563 tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; 4566 tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
4564 b43_wireless_core_reset(dev, tmp); 4567 b43_wireless_core_reset(dev, tmp);
4565 4568
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 3cfc30307a27..6c3a74964ab8 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -35,6 +35,7 @@
35 35
36static /*const */ struct pcmcia_device_id b43_pcmcia_tbl[] = { 36static /*const */ struct pcmcia_device_id b43_pcmcia_tbl[] = {
37 PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448), 37 PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448),
38 PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476),
38 PCMCIA_DEVICE_NULL, 39 PCMCIA_DEVICE_NULL,
39}; 40};
40 41
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 77fda148ac46..038baa8869e2 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -607,6 +607,7 @@ struct b43legacy_wl {
607 u8 nr_devs; 607 u8 nr_devs;
608 608
609 bool radiotap_enabled; 609 bool radiotap_enabled;
610 bool radio_enabled;
610 611
611 /* The beacon we are currently using (AP or IBSS mode). 612 /* The beacon we are currently using (AP or IBSS mode).
612 * This beacon stuff is protected by the irq_lock. */ 613 * This beacon stuff is protected by the irq_lock. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index e5136fb65ddd..c4973c1942bf 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2689,8 +2689,8 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2689 /* Antennas for RX and management frame TX. */ 2689 /* Antennas for RX and management frame TX. */
2690 b43legacy_mgmtframe_txantenna(dev, antenna_tx); 2690 b43legacy_mgmtframe_txantenna(dev, antenna_tx);
2691 2691
2692 if (!!conf->radio_enabled != phy->radio_on) { 2692 if (wl->radio_enabled != phy->radio_on) {
2693 if (conf->radio_enabled) { 2693 if (wl->radio_enabled) {
2694 b43legacy_radio_turn_on(dev); 2694 b43legacy_radio_turn_on(dev);
2695 b43legacyinfo(dev->wl, "Radio turned on by software\n"); 2695 b43legacyinfo(dev->wl, "Radio turned on by software\n");
2696 if (!dev->radio_hw_enable) 2696 if (!dev->radio_hw_enable)
@@ -3441,6 +3441,7 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
3441 wl->beacon0_uploaded = 0; 3441 wl->beacon0_uploaded = 0;
3442 wl->beacon1_uploaded = 0; 3442 wl->beacon1_uploaded = 0;
3443 wl->beacon_templates_virgin = 1; 3443 wl->beacon_templates_virgin = 1;
3444 wl->radio_enabled = 1;
3444 3445
3445 mutex_lock(&wl->mutex); 3446 mutex_lock(&wl->mutex);
3446 3447
@@ -3479,6 +3480,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
3479 if (b43legacy_status(dev) >= B43legacy_STAT_STARTED) 3480 if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
3480 b43legacy_wireless_core_stop(dev); 3481 b43legacy_wireless_core_stop(dev);
3481 b43legacy_wireless_core_exit(dev); 3482 b43legacy_wireless_core_exit(dev);
3483 wl->radio_enabled = 0;
3482 mutex_unlock(&wl->mutex); 3484 mutex_unlock(&wl->mutex);
3483} 3485}
3484 3486
@@ -3620,6 +3622,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
3620 have_bphy = 1; 3622 have_bphy = 1;
3621 3623
3622 dev->phy.gmode = (have_gphy || have_bphy); 3624 dev->phy.gmode = (have_gphy || have_bphy);
3625 dev->phy.radio_on = 1;
3623 tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0; 3626 tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
3624 b43legacy_wireless_core_reset(dev, tmp); 3627 b43legacy_wireless_core_reset(dev, tmp);
3625 3628
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index fbb3a573463e..2de6471d4be9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -112,7 +112,7 @@ enum iwl3945_antenna {
112#define IWL_TX_FIFO_NONE 7 112#define IWL_TX_FIFO_NONE 7
113 113
114/* Minimum number of queues. MAX_NUM is defined in hw specific files */ 114/* Minimum number of queues. MAX_NUM is defined in hw specific files */
115#define IWL_MIN_NUM_QUEUES 4 115#define IWL39_MIN_NUM_QUEUES 4
116 116
117#define IEEE80211_DATA_LEN 2304 117#define IEEE80211_DATA_LEN 2304
118#define IEEE80211_4ADDR_LEN 30 118#define IEEE80211_4ADDR_LEN 30
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 6d1519e1f011..355f50ea7fef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2675,12 +2675,10 @@ static ssize_t show_power_level(struct device *d,
2675 struct device_attribute *attr, char *buf) 2675 struct device_attribute *attr, char *buf)
2676{ 2676{
2677 struct iwl_priv *priv = dev_get_drvdata(d); 2677 struct iwl_priv *priv = dev_get_drvdata(d);
2678 int mode = priv->power_data.user_power_setting;
2679 int level = priv->power_data.power_mode; 2678 int level = priv->power_data.power_mode;
2680 char *p = buf; 2679 char *p = buf;
2681 2680
2682 p += sprintf(p, "INDEX:%d\t", level); 2681 p += sprintf(p, "%d\n", level);
2683 p += sprintf(p, "USER:%d\n", mode);
2684 return p - buf + 1; 2682 return p - buf + 1;
2685} 2683}
2686 2684
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 6ab07165ea28..18b135f510e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1332,6 +1332,9 @@ int iwl_setup_mac(struct iwl_priv *priv)
1332 1332
1333 hw->wiphy->custom_regulatory = true; 1333 hw->wiphy->custom_regulatory = true;
1334 1334
1335 /* Firmware does not support this */
1336 hw->wiphy->disable_beacon_hints = true;
1337
1335 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 1338 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
1336 /* we create the 802.11 header and a zero-length SSID element */ 1339 /* we create the 802.11 header and a zero-length SSID element */
1337 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; 1340 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 11e08c068917..ca00cc8ad4c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -308,18 +308,18 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
308 return -ENODATA; 308 return -ENODATA;
309 } 309 }
310 310
311 ptr = priv->eeprom;
312 if (!ptr) {
313 IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
314 return -ENOMEM;
315 }
316
311 /* 4 characters for byte 0xYY */ 317 /* 4 characters for byte 0xYY */
312 buf = kzalloc(buf_size, GFP_KERNEL); 318 buf = kzalloc(buf_size, GFP_KERNEL);
313 if (!buf) { 319 if (!buf) {
314 IWL_ERR(priv, "Can not allocate Buffer\n"); 320 IWL_ERR(priv, "Can not allocate Buffer\n");
315 return -ENOMEM; 321 return -ENOMEM;
316 } 322 }
317
318 ptr = priv->eeprom;
319 if (!ptr) {
320 IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
321 return -ENOMEM;
322 }
323 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n", 323 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n",
324 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) 324 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
325 ? "OTP" : "EEPROM"); 325 ? "OTP" : "EEPROM");
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index e2d620f0b6e8..650e20af20fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -258,8 +258,10 @@ struct iwl_channel_info {
258#define IWL_TX_FIFO_HCCA_2 6 258#define IWL_TX_FIFO_HCCA_2 6
259#define IWL_TX_FIFO_NONE 7 259#define IWL_TX_FIFO_NONE 7
260 260
261/* Minimum number of queues. MAX_NUM is defined in hw specific files */ 261/* Minimum number of queues. MAX_NUM is defined in hw specific files.
262#define IWL_MIN_NUM_QUEUES 4 262 * Set the minimum to accommodate the 4 standard TX queues, 1 command
263 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
264#define IWL_MIN_NUM_QUEUES 10
263 265
264/* Power management (not Tx power) structures */ 266/* Power management (not Tx power) structures */
265 267
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 2addf735b193..ffd5c61a7553 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -566,6 +566,8 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
566 unsigned long flags; 566 unsigned long flags;
567 567
568 spin_lock_irqsave(&priv->sta_lock, flags); 568 spin_lock_irqsave(&priv->sta_lock, flags);
569 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
570 keyconf->keyidx);
569 571
570 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) 572 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
571 IWL_ERR(priv, "index %d not used in uCode key table.\n", 573 IWL_ERR(priv, "index %d not used in uCode key table.\n",
@@ -573,6 +575,11 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
573 575
574 priv->default_wep_key--; 576 priv->default_wep_key--;
575 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); 577 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
578 if (iwl_is_rfkill(priv)) {
579 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
580 spin_unlock_irqrestore(&priv->sta_lock, flags);
581 return 0;
582 }
576 ret = iwl_send_static_wepkey_cmd(priv, 1); 583 ret = iwl_send_static_wepkey_cmd(priv, 1);
577 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", 584 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
578 keyconf->keyidx, ret); 585 keyconf->keyidx, ret);
@@ -853,6 +860,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
853 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 860 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
854 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 861 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
855 862
863 if (iwl_is_rfkill(priv)) {
864 IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n");
865 spin_unlock_irqrestore(&priv->sta_lock, flags);
866 return 0;
867 }
856 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 868 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
857 spin_unlock_irqrestore(&priv->sta_lock, flags); 869 spin_unlock_irqrestore(&priv->sta_lock, flags);
858 return ret; 870 return ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 85ae7a62109c..2e89040e63be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -720,8 +720,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
720 goto drop_unlock; 720 goto drop_unlock;
721 } 721 }
722 722
723 spin_unlock_irqrestore(&priv->lock, flags);
724
725 hdr_len = ieee80211_hdrlen(fc); 723 hdr_len = ieee80211_hdrlen(fc);
726 724
727 /* Find (or create) index into station table for destination station */ 725 /* Find (or create) index into station table for destination station */
@@ -729,7 +727,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
729 if (sta_id == IWL_INVALID_STATION) { 727 if (sta_id == IWL_INVALID_STATION) {
730 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 728 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
731 hdr->addr1); 729 hdr->addr1);
732 goto drop; 730 goto drop_unlock;
733 } 731 }
734 732
735 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); 733 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
@@ -750,14 +748,17 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
750 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 748 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
751 swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); 749 swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id);
752 } 750 }
753 priv->stations[sta_id].tid[tid].tfds_in_queue++;
754 } 751 }
755 752
756 txq = &priv->txq[txq_id]; 753 txq = &priv->txq[txq_id];
757 q = &txq->q; 754 q = &txq->q;
758 txq->swq_id = swq_id; 755 txq->swq_id = swq_id;
759 756
760 spin_lock_irqsave(&priv->lock, flags); 757 if (unlikely(iwl_queue_space(q) < q->high_mark))
758 goto drop_unlock;
759
760 if (ieee80211_is_data_qos(fc))
761 priv->stations[sta_id].tid[tid].tfds_in_queue++;
761 762
762 /* Set up driver data for this TFD */ 763 /* Set up driver data for this TFD */
763 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 764 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@@ -872,7 +873,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
872 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); 873 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
873 874
874 /* Set up entry for this TFD in Tx byte-count array */ 875 /* Set up entry for this TFD in Tx byte-count array */
875 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 876 if (info->flags & IEEE80211_TX_CTL_AMPDU)
877 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
876 le16_to_cpu(tx_cmd->len)); 878 le16_to_cpu(tx_cmd->len));
877 879
878 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, 880 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
@@ -901,7 +903,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
901 903
902drop_unlock: 904drop_unlock:
903 spin_unlock_irqrestore(&priv->lock, flags); 905 spin_unlock_irqrestore(&priv->lock, flags);
904drop:
905 return -1; 906 return -1;
906} 907}
907EXPORT_SYMBOL(iwl_tx_skb); 908EXPORT_SYMBOL(iwl_tx_skb);
@@ -1170,6 +1171,8 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1170 IWL_ERR(priv, "Start AGG on invalid station\n"); 1171 IWL_ERR(priv, "Start AGG on invalid station\n");
1171 return -ENXIO; 1172 return -ENXIO;
1172 } 1173 }
1174 if (unlikely(tid >= MAX_TID_COUNT))
1175 return -EINVAL;
1173 1176
1174 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 1177 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1175 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); 1178 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index cb9bd4c8f25e..523843369ca2 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -3643,12 +3643,10 @@ static ssize_t show_power_level(struct device *d,
3643 struct device_attribute *attr, char *buf) 3643 struct device_attribute *attr, char *buf)
3644{ 3644{
3645 struct iwl_priv *priv = dev_get_drvdata(d); 3645 struct iwl_priv *priv = dev_get_drvdata(d);
3646 int mode = priv->power_data.user_power_setting;
3647 int level = priv->power_data.power_mode; 3646 int level = priv->power_data.power_mode;
3648 char *p = buf; 3647 char *p = buf;
3649 3648
3650 p += sprintf(p, "INDEX:%d\t", level); 3649 p += sprintf(p, "%d\n", level);
3651 p += sprintf(p, "USER:%d\n", mode);
3652 return p - buf + 1; 3650 return p - buf + 1;
3653} 3651}
3654 3652
@@ -3970,6 +3968,9 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3970 3968
3971 hw->wiphy->custom_regulatory = true; 3969 hw->wiphy->custom_regulatory = true;
3972 3970
3971 /* Firmware does not support this */
3972 hw->wiphy->disable_beacon_hints = true;
3973
3973 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3974 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3974 /* we create the 802.11 header and a zero-length SSID element */ 3975 /* we create the 802.11 header and a zero-length SSID element */
3975 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; 3976 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
@@ -4020,10 +4021,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4020 SET_IEEE80211_DEV(hw, &pdev->dev); 4021 SET_IEEE80211_DEV(hw, &pdev->dev);
4021 4022
4022 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) || 4023 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) ||
4023 (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) { 4024 (iwl3945_mod_params.num_of_queues < IWL39_MIN_NUM_QUEUES)) {
4024 IWL_ERR(priv, 4025 IWL_ERR(priv,
4025 "invalid queues_num, should be between %d and %d\n", 4026 "invalid queues_num, should be between %d and %d\n",
4026 IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); 4027 IWL39_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
4027 err = -EINVAL; 4028 err = -EINVAL;
4028 goto out_ieee80211_free_hw; 4029 goto out_ieee80211_free_hw;
4029 } 4030 }
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index 1eccb6df46dd..030401d367d3 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -4,6 +4,15 @@ config IWM
4 depends on CFG80211 4 depends on CFG80211
5 select WIRELESS_EXT 5 select WIRELESS_EXT
6 select FW_LOADER 6 select FW_LOADER
7 help
8 The Intel Wireless Multicomm 3200 hardware is a combo
9 card with GPS, Bluetooth, WiMax and 802.11 radios. It
10 runs over SDIO and is typically found on Moorestown
11 based platform. This driver takes care of the 802.11
12 part, which is a fullmac one.
13
14 If you choose to build it as a module, it'll be called
15 iwmc3200wifi.ko.
7 16
8config IWM_DEBUG 17config IWM_DEBUG
9 bool "Enable full debugging output in iwmc3200wifi" 18 bool "Enable full debugging output in iwmc3200wifi"
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 834a7f544e5d..e2334d123599 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -220,6 +220,7 @@ int iwm_store_rxiq_calib_result(struct iwm_priv *iwm)
220 eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); 220 eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
221 if (IS_ERR(eeprom_rxiq)) { 221 if (IS_ERR(eeprom_rxiq)) {
222 IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n"); 222 IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n");
223 kfree(rxiq);
223 return PTR_ERR(eeprom_rxiq); 224 return PTR_ERR(eeprom_rxiq);
224 } 225 }
225 226
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index aaa20c6885c8..bf294e41753b 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -106,10 +106,8 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
106 int ret = 0; 106 int ret = 0;
107 107
108 wdev = iwm_wdev_alloc(sizeof_bus, dev); 108 wdev = iwm_wdev_alloc(sizeof_bus, dev);
109 if (!wdev) { 109 if (IS_ERR(wdev))
110 dev_err(dev, "no memory for wireless device instance\n"); 110 return wdev;
111 return ERR_PTR(-ENOMEM);
112 }
113 111
114 iwm = wdev_to_iwm(wdev); 112 iwm = wdev_to_iwm(wdev);
115 iwm->bus_ops = if_ops; 113 iwm->bus_ops = if_ops;
@@ -151,8 +149,8 @@ void iwm_if_free(struct iwm_priv *iwm)
151 return; 149 return;
152 150
153 free_netdev(iwm_to_ndev(iwm)); 151 free_netdev(iwm_to_ndev(iwm));
154 iwm_wdev_free(iwm);
155 iwm_priv_deinit(iwm); 152 iwm_priv_deinit(iwm);
153 iwm_wdev_free(iwm);
156} 154}
157 155
158int iwm_if_add(struct iwm_priv *iwm) 156int iwm_if_add(struct iwm_priv *iwm)
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
index 9a5408e7d94a..5c6968101f0d 100644
--- a/drivers/net/wireless/libertas/11d.c
+++ b/drivers/net/wireless/libertas/11d.c
@@ -47,7 +47,7 @@ static u8 lbs_region_2_code(u8 *region)
47{ 47{
48 u8 i; 48 u8 i;
49 49
50 for (i = 0; region[i] && i < COUNTRY_CODE_LEN; i++) 50 for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++)
51 region[i] = toupper(region[i]); 51 region[i] = toupper(region[i]);
52 52
53 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) { 53 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index b9b374119033..d6997371c27e 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -1,6 +1,7 @@
1/* Copyright (C) 2006, Red Hat, Inc. */ 1/* Copyright (C) 2006, Red Hat, Inc. */
2 2
3#include <linux/types.h> 3#include <linux/types.h>
4#include <linux/kernel.h>
4#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
5#include <linux/ieee80211.h> 6#include <linux/ieee80211.h>
6#include <linux/if_arp.h> 7#include <linux/if_arp.h>
@@ -43,21 +44,21 @@ static int get_common_rates(struct lbs_private *priv,
43 u16 *rates_size) 44 u16 *rates_size)
44{ 45{
45 u8 *card_rates = lbs_bg_rates; 46 u8 *card_rates = lbs_bg_rates;
46 size_t num_card_rates = sizeof(lbs_bg_rates);
47 int ret = 0, i, j; 47 int ret = 0, i, j;
48 u8 tmp[30]; 48 u8 tmp[(ARRAY_SIZE(lbs_bg_rates) - 1) * (*rates_size - 1)];
49 size_t tmp_size = 0; 49 size_t tmp_size = 0;
50 50
51 /* For each rate in card_rates that exists in rate1, copy to tmp */ 51 /* For each rate in card_rates that exists in rate1, copy to tmp */
52 for (i = 0; card_rates[i] && (i < num_card_rates); i++) { 52 for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && card_rates[i]; i++) {
53 for (j = 0; rates[j] && (j < *rates_size); j++) { 53 for (j = 0; j < *rates_size && rates[j]; j++) {
54 if (rates[j] == card_rates[i]) 54 if (rates[j] == card_rates[i])
55 tmp[tmp_size++] = card_rates[i]; 55 tmp[tmp_size++] = card_rates[i];
56 } 56 }
57 } 57 }
58 58
59 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); 59 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
60 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates); 60 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates,
61 ARRAY_SIZE(lbs_bg_rates));
61 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); 62 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
62 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); 63 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
63 64
@@ -69,10 +70,7 @@ static int get_common_rates(struct lbs_private *priv,
69 lbs_pr_alert("Previously set fixed data rate %#x isn't " 70 lbs_pr_alert("Previously set fixed data rate %#x isn't "
70 "compatible with the network.\n", priv->cur_rate); 71 "compatible with the network.\n", priv->cur_rate);
71 ret = -1; 72 ret = -1;
72 goto done;
73 } 73 }
74 ret = 0;
75
76done: 74done:
77 memset(rates, 0, *rates_size); 75 memset(rates, 0, *rates_size);
78 *rates_size = min_t(int, tmp_size, *rates_size); 76 *rates_size = min_t(int, tmp_size, *rates_size);
@@ -322,7 +320,7 @@ static int lbs_associate(struct lbs_private *priv,
322 rates = (struct mrvl_ie_rates_param_set *) pos; 320 rates = (struct mrvl_ie_rates_param_set *) pos;
323 rates->header.type = cpu_to_le16(TLV_TYPE_RATES); 321 rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
324 memcpy(&rates->rates, &bss->rates, MAX_RATES); 322 memcpy(&rates->rates, &bss->rates, MAX_RATES);
325 tmplen = MAX_RATES; 323 tmplen = min_t(u16, ARRAY_SIZE(rates->rates), MAX_RATES);
326 if (get_common_rates(priv, rates->rates, &tmplen)) { 324 if (get_common_rates(priv, rates->rates, &tmplen)) {
327 ret = -1; 325 ret = -1;
328 goto done; 326 goto done;
@@ -598,7 +596,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
598 596
599 /* Copy Data rates from the rates recorded in scan response */ 597 /* Copy Data rates from the rates recorded in scan response */
600 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates)); 598 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
601 ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES); 599 ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), MAX_RATES);
602 memcpy(cmd.bss.rates, bss->rates, ratesize); 600 memcpy(cmd.bss.rates, bss->rates, ratesize);
603 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) { 601 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
604 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n"); 602 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 01db705a38ec..685098148e10 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -135,8 +135,14 @@ int lbs_update_hw_spec(struct lbs_private *priv)
135 /* Clamp region code to 8-bit since FW spec indicates that it should 135 /* Clamp region code to 8-bit since FW spec indicates that it should
136 * only ever be 8-bit, even though the field size is 16-bit. Some firmware 136 * only ever be 8-bit, even though the field size is 16-bit. Some firmware
137 * returns non-zero high 8 bits here. 137 * returns non-zero high 8 bits here.
138 *
139 * Firmware version 4.0.102 used in CF8381 has region code shifted. We
140 * need to check for this problem and handle it properly.
138 */ 141 */
139 priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF; 142 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V4)
143 priv->regioncode = (le16_to_cpu(cmd.regioncode) >> 8) & 0xFF;
144 else
145 priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF;
140 146
141 for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) { 147 for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) {
142 /* use the region code to search for the index */ 148 /* use the region code to search for the index */
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 48da157d6cda..72f3479a4d70 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -234,6 +234,8 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
234/** Mesh enable bit in FW capability */ 234/** Mesh enable bit in FW capability */
235#define MESH_CAPINFO_ENABLE_MASK (1<<16) 235#define MESH_CAPINFO_ENABLE_MASK (1<<16)
236 236
237/** FW definition from Marvell v4 */
238#define MRVL_FW_V4 (0x04)
237/** FW definition from Marvell v5 */ 239/** FW definition from Marvell v5 */
238#define MRVL_FW_V5 (0x05) 240#define MRVL_FW_V5 (0x05)
239/** FW definition from Marvell v10 */ 241/** FW definition from Marvell v10 */
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 601b54249677..6c95af3023cc 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -5,6 +5,7 @@
5 * for sending scan commands to the firmware. 5 * for sending scan commands to the firmware.
6 */ 6 */
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/kernel.h>
8#include <linux/etherdevice.h> 9#include <linux/etherdevice.h>
9#include <linux/if_arp.h> 10#include <linux/if_arp.h>
10#include <asm/unaligned.h> 11#include <asm/unaligned.h>
@@ -876,7 +877,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
876 iwe.u.bitrate.disabled = 0; 877 iwe.u.bitrate.disabled = 0;
877 iwe.u.bitrate.value = 0; 878 iwe.u.bitrate.value = 0;
878 879
879 for (j = 0; bss->rates[j] && (j < sizeof(bss->rates)); j++) { 880 for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) {
880 /* Bit rate given in 500 kb/s units */ 881 /* Bit rate given in 500 kb/s units */
881 iwe.u.bitrate.value = bss->rates[j] * 500000; 882 iwe.u.bitrate.value = bss->rates[j] * 500000;
882 current_val = iwe_stream_add_value(info, start, current_val, 883 current_val = iwe_stream_add_value(info, start, current_val,
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index e789c6e9938c..7916ca3f84c8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -418,6 +418,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
418 continue; 418 continue;
419 419
420 if (!data2->started || !hwsim_ps_rx_ok(data2, skb) || 420 if (!data2->started || !hwsim_ps_rx_ok(data2, skb) ||
421 !data->channel || !data2->channel ||
421 data->channel->center_freq != data2->channel->center_freq || 422 data->channel->center_freq != data2->channel->center_freq ||
422 !(data->group & data2->group)) 423 !(data->group & data2->group))
423 continue; 424 continue;
@@ -708,7 +709,7 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
708static void mac80211_hwsim_free(void) 709static void mac80211_hwsim_free(void)
709{ 710{
710 struct list_head tmplist, *i, *tmp; 711 struct list_head tmplist, *i, *tmp;
711 struct mac80211_hwsim_data *data; 712 struct mac80211_hwsim_data *data, *tmpdata;
712 713
713 INIT_LIST_HEAD(&tmplist); 714 INIT_LIST_HEAD(&tmplist);
714 715
@@ -717,7 +718,7 @@ static void mac80211_hwsim_free(void)
717 list_move(i, &tmplist); 718 list_move(i, &tmplist);
718 spin_unlock_bh(&hwsim_radio_lock); 719 spin_unlock_bh(&hwsim_radio_lock);
719 720
720 list_for_each_entry(data, &tmplist, list) { 721 list_for_each_entry_safe(data, tmpdata, &tmplist, list) {
721 debugfs_remove(data->debugfs_group); 722 debugfs_remove(data->debugfs_group);
722 debugfs_remove(data->debugfs_ps); 723 debugfs_remove(data->debugfs_ps);
723 debugfs_remove(data->debugfs); 724 debugfs_remove(data->debugfs);
@@ -1166,8 +1167,8 @@ static void __exit exit_mac80211_hwsim(void)
1166{ 1167{
1167 printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n"); 1168 printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n");
1168 1169
1169 unregister_netdev(hwsim_mon);
1170 mac80211_hwsim_free(); 1170 mac80211_hwsim_free();
1171 unregister_netdev(hwsim_mon);
1171} 1172}
1172 1173
1173 1174
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 345593c4accb..a370e510f19f 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -2521,6 +2521,8 @@ static const struct net_device_ops orinoco_netdev_ops = {
2521 .ndo_start_xmit = orinoco_xmit, 2521 .ndo_start_xmit = orinoco_xmit,
2522 .ndo_set_multicast_list = orinoco_set_multicast_list, 2522 .ndo_set_multicast_list = orinoco_set_multicast_list,
2523 .ndo_change_mtu = orinoco_change_mtu, 2523 .ndo_change_mtu = orinoco_change_mtu,
2524 .ndo_set_mac_address = eth_mac_addr,
2525 .ndo_validate_addr = eth_validate_addr,
2524 .ndo_tx_timeout = orinoco_tx_timeout, 2526 .ndo_tx_timeout = orinoco_tx_timeout,
2525 .ndo_get_stats = orinoco_get_stats, 2527 .ndo_get_stats = orinoco_get_stats,
2526}; 2528};
@@ -2555,7 +2557,6 @@ struct net_device
2555 priv->wireless_data.spy_data = &priv->spy_data; 2557 priv->wireless_data.spy_data = &priv->spy_data;
2556 dev->wireless_data = &priv->wireless_data; 2558 dev->wireless_data = &priv->wireless_data;
2557#endif 2559#endif
2558 /* we use the default eth_mac_addr for setting the MAC addr */
2559 2560
2560 /* Reserve space in skb for the SNAP header */ 2561 /* Reserve space in skb for the SNAP header */
2561 dev->hard_header_len += ENCAPS_OVERHEAD; 2562 dev->hard_header_len += ENCAPS_OVERHEAD;
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index b618bd14583f..22ca122bd798 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -823,30 +823,30 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
823 struct p54_tx_info *range; 823 struct p54_tx_info *range;
824 unsigned long flags; 824 unsigned long flags;
825 825
826 if (unlikely(!skb || !dev || skb_queue_empty(&priv->tx_queue))) 826 if (unlikely(!skb || !dev || !skb_queue_len(&priv->tx_queue)))
827 return; 827 return;
828 828
829 /* There used to be a check here to see if the SKB was on the 829 /*
830 * TX queue or not. This can never happen because all SKBs we 830 * don't try to free an already unlinked skb
831 * see here successfully went through p54_assign_address()
832 * which means the SKB is on the ->tx_queue.
833 */ 831 */
832 if (unlikely((!skb->next) || (!skb->prev)))
833 return;
834 834
835 spin_lock_irqsave(&priv->tx_queue.lock, flags); 835 spin_lock_irqsave(&priv->tx_queue.lock, flags);
836 info = IEEE80211_SKB_CB(skb); 836 info = IEEE80211_SKB_CB(skb);
837 range = (void *)info->rate_driver_data; 837 range = (void *)info->rate_driver_data;
838 if (!skb_queue_is_first(&priv->tx_queue, skb)) { 838 if (skb->prev != (struct sk_buff *)&priv->tx_queue) {
839 struct ieee80211_tx_info *ni; 839 struct ieee80211_tx_info *ni;
840 struct p54_tx_info *mr; 840 struct p54_tx_info *mr;
841 841
842 ni = IEEE80211_SKB_CB(skb_queue_prev(&priv->tx_queue, skb)); 842 ni = IEEE80211_SKB_CB(skb->prev);
843 mr = (struct p54_tx_info *)ni->rate_driver_data; 843 mr = (struct p54_tx_info *)ni->rate_driver_data;
844 } 844 }
845 if (!skb_queue_is_last(&priv->tx_queue, skb)) { 845 if (skb->next != (struct sk_buff *)&priv->tx_queue) {
846 struct ieee80211_tx_info *ni; 846 struct ieee80211_tx_info *ni;
847 struct p54_tx_info *mr; 847 struct p54_tx_info *mr;
848 848
849 ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue, skb)); 849 ni = IEEE80211_SKB_CB(skb->next);
850 mr = (struct p54_tx_info *)ni->rate_driver_data; 850 mr = (struct p54_tx_info *)ni->rate_driver_data;
851 } 851 }
852 __skb_unlink(skb, &priv->tx_queue); 852 __skb_unlink(skb, &priv->tx_queue);
@@ -864,13 +864,15 @@ static struct sk_buff *p54_find_tx_entry(struct ieee80211_hw *dev,
864 unsigned long flags; 864 unsigned long flags;
865 865
866 spin_lock_irqsave(&priv->tx_queue.lock, flags); 866 spin_lock_irqsave(&priv->tx_queue.lock, flags);
867 skb_queue_walk(&priv->tx_queue, entry) { 867 entry = priv->tx_queue.next;
868 while (entry != (struct sk_buff *)&priv->tx_queue) {
868 struct p54_hdr *hdr = (struct p54_hdr *) entry->data; 869 struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
869 870
870 if (hdr->req_id == req_id) { 871 if (hdr->req_id == req_id) {
871 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 872 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
872 return entry; 873 return entry;
873 } 874 }
875 entry = entry->next;
874 } 876 }
875 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 877 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
876 return NULL; 878 return NULL;
@@ -888,33 +890,36 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
888 int count, idx; 890 int count, idx;
889 891
890 spin_lock_irqsave(&priv->tx_queue.lock, flags); 892 spin_lock_irqsave(&priv->tx_queue.lock, flags);
891 skb_queue_walk(&priv->tx_queue, entry) { 893 entry = (struct sk_buff *) priv->tx_queue.next;
894 while (entry != (struct sk_buff *)&priv->tx_queue) {
892 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 895 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
893 struct p54_hdr *entry_hdr; 896 struct p54_hdr *entry_hdr;
894 struct p54_tx_data *entry_data; 897 struct p54_tx_data *entry_data;
895 unsigned int pad = 0, frame_len; 898 unsigned int pad = 0, frame_len;
896 899
897 range = (void *)info->rate_driver_data; 900 range = (void *)info->rate_driver_data;
898 if (range->start_addr != addr) 901 if (range->start_addr != addr) {
902 entry = entry->next;
899 continue; 903 continue;
904 }
900 905
901 if (!skb_queue_is_last(&priv->tx_queue, entry)) { 906 if (entry->next != (struct sk_buff *)&priv->tx_queue) {
902 struct ieee80211_tx_info *ni; 907 struct ieee80211_tx_info *ni;
903 struct p54_tx_info *mr; 908 struct p54_tx_info *mr;
904 909
905 ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue, 910 ni = IEEE80211_SKB_CB(entry->next);
906 entry));
907 mr = (struct p54_tx_info *)ni->rate_driver_data; 911 mr = (struct p54_tx_info *)ni->rate_driver_data;
908 } 912 }
909 913
910 __skb_unlink(entry, &priv->tx_queue); 914 __skb_unlink(entry, &priv->tx_queue);
911 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
912 915
913 frame_len = entry->len; 916 frame_len = entry->len;
914 entry_hdr = (struct p54_hdr *) entry->data; 917 entry_hdr = (struct p54_hdr *) entry->data;
915 entry_data = (struct p54_tx_data *) entry_hdr->data; 918 entry_data = (struct p54_tx_data *) entry_hdr->data;
916 priv->tx_stats[entry_data->hw_queue].len--; 919 if (priv->tx_stats[entry_data->hw_queue].len)
920 priv->tx_stats[entry_data->hw_queue].len--;
917 priv->stats.dot11ACKFailureCount += payload->tries - 1; 921 priv->stats.dot11ACKFailureCount += payload->tries - 1;
922 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
918 923
919 /* 924 /*
920 * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are 925 * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are
@@ -1164,21 +1169,23 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
1164 } 1169 }
1165 } 1170 }
1166 1171
1167 skb_queue_walk(&priv->tx_queue, entry) { 1172 entry = priv->tx_queue.next;
1173 while (left--) {
1168 u32 hole_size; 1174 u32 hole_size;
1169 info = IEEE80211_SKB_CB(entry); 1175 info = IEEE80211_SKB_CB(entry);
1170 range = (void *)info->rate_driver_data; 1176 range = (void *)info->rate_driver_data;
1171 hole_size = range->start_addr - last_addr; 1177 hole_size = range->start_addr - last_addr;
1172 if (!target_skb && hole_size >= len) { 1178 if (!target_skb && hole_size >= len) {
1173 target_skb = skb_queue_prev(&priv->tx_queue, entry); 1179 target_skb = entry->prev;
1174 hole_size -= len; 1180 hole_size -= len;
1175 target_addr = last_addr; 1181 target_addr = last_addr;
1176 } 1182 }
1177 largest_hole = max(largest_hole, hole_size); 1183 largest_hole = max(largest_hole, hole_size);
1178 last_addr = range->end_addr; 1184 last_addr = range->end_addr;
1185 entry = entry->next;
1179 } 1186 }
1180 if (!target_skb && priv->rx_end - last_addr >= len) { 1187 if (!target_skb && priv->rx_end - last_addr >= len) {
1181 target_skb = skb_peek_tail(&priv->tx_queue); 1188 target_skb = priv->tx_queue.prev;
1182 largest_hole = max(largest_hole, priv->rx_end - last_addr - len); 1189 largest_hole = max(largest_hole, priv->rx_end - last_addr - len);
1183 if (!skb_queue_empty(&priv->tx_queue)) { 1190 if (!skb_queue_empty(&priv->tx_queue)) {
1184 info = IEEE80211_SKB_CB(target_skb); 1191 info = IEEE80211_SKB_CB(target_skb);
@@ -2084,6 +2091,7 @@ out:
2084static void p54_stop(struct ieee80211_hw *dev) 2091static void p54_stop(struct ieee80211_hw *dev)
2085{ 2092{
2086 struct p54_common *priv = dev->priv; 2093 struct p54_common *priv = dev->priv;
2094 struct sk_buff *skb;
2087 2095
2088 mutex_lock(&priv->conf_mutex); 2096 mutex_lock(&priv->conf_mutex);
2089 priv->mode = NL80211_IFTYPE_UNSPECIFIED; 2097 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2098,7 +2106,8 @@ static void p54_stop(struct ieee80211_hw *dev)
2098 p54_tx_cancel(dev, priv->cached_beacon); 2106 p54_tx_cancel(dev, priv->cached_beacon);
2099 2107
2100 priv->stop(dev); 2108 priv->stop(dev);
2101 skb_queue_purge(&priv->tx_queue); 2109 while ((skb = skb_dequeue(&priv->tx_queue)))
2110 kfree_skb(skb);
2102 priv->cached_beacon = NULL; 2111 priv->cached_beacon = NULL;
2103 priv->tsf_high32 = priv->tsf_low32 = 0; 2112 priv->tsf_high32 = priv->tsf_low32 = 0;
2104 mutex_unlock(&priv->conf_mutex); 2113 mutex_unlock(&priv->conf_mutex);
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 83116baeb110..72c7dbd39d0a 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -635,7 +635,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
635 635
636 hw = p54_init_common(sizeof(*priv)); 636 hw = p54_init_common(sizeof(*priv));
637 if (!hw) { 637 if (!hw) {
638 dev_err(&priv->spi->dev, "could not alloc ieee80211_hw"); 638 dev_err(&spi->dev, "could not alloc ieee80211_hw");
639 return -ENOMEM; 639 return -ENOMEM;
640 } 640 }
641 641
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index b10b0383dfa5..698b11b1cadb 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2427,11 +2427,10 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2427 2427
2428#ifdef PCMCIA_DEBUG 2428#ifdef PCMCIA_DEBUG
2429 if (pc_debug > 3) { 2429 if (pc_debug > 3) {
2430 int i; 2430 print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ",
2431 printk(KERN_DEBUG "skb->data before untranslate"); 2431 DUMP_PREFIX_NONE, 16, 1,
2432 for (i = 0; i < 64; i++) 2432 skb->data, 64, true);
2433 printk("%02x ", skb->data[i]); 2433 printk(KERN_DEBUG
2434 printk("\n" KERN_DEBUG
2435 "type = %08x, xsap = %02x%02x%02x, org = %02x02x02x\n", 2434 "type = %08x, xsap = %02x%02x%02x, org = %02x02x02x\n",
2436 ntohs(type), psnap->dsap, psnap->ssap, psnap->ctrl, 2435 ntohs(type), psnap->dsap, psnap->ssap, psnap->ctrl,
2437 psnap->org[0], psnap->org[1], psnap->org[2]); 2436 psnap->org[0], psnap->org[1], psnap->org[2]);
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 66daf68ff0ee..ce75426764a1 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1550,7 +1550,9 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1550 rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg); 1550 rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg);
1551 rt2x00_set_chip(rt2x00dev, RT2570, value, reg); 1551 rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
1552 1552
1553 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0)) { 1553 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) ||
1554 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
1555
1554 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1556 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
1555 return -ENODEV; 1557 return -ENODEV;
1556 } 1558 }
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
index b44253592243..cf9f899fe0e6 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -208,11 +208,12 @@ void rtl8187_leds_exit(struct ieee80211_hw *dev)
208{ 208{
209 struct rtl8187_priv *priv = dev->priv; 209 struct rtl8187_priv *priv = dev->priv;
210 210
211 rtl8187_unregister_led(&priv->led_tx);
212 /* turn the LED off before exiting */ 211 /* turn the LED off before exiting */
213 queue_delayed_work(dev->workqueue, &priv->led_off, 0); 212 queue_delayed_work(dev->workqueue, &priv->led_off, 0);
214 cancel_delayed_work_sync(&priv->led_off); 213 cancel_delayed_work_sync(&priv->led_off);
214 cancel_delayed_work_sync(&priv->led_on);
215 rtl8187_unregister_led(&priv->led_rx); 215 rtl8187_unregister_led(&priv->led_rx);
216 rtl8187_unregister_led(&priv->led_tx);
216} 217}
217#endif /* def CONFIG_RTL8187_LED */ 218#endif /* def CONFIG_RTL8187_LED */
218 219
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 6af706408ac0..c6d300666ad8 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -3556,17 +3556,8 @@ wv_82593_config(struct net_device * dev)
3556 cfblk.rcvstop = TRUE; /* Enable Receive Stop Register */ 3556 cfblk.rcvstop = TRUE; /* Enable Receive Stop Register */
3557 3557
3558#ifdef DEBUG_I82593_SHOW 3558#ifdef DEBUG_I82593_SHOW
3559 { 3559 print_hex_dump(KERN_DEBUG, "wavelan_cs: config block: ", DUMP_PREFIX_NONE,
3560 u_char *c = (u_char *) &cfblk; 3560 16, 1, &cfblk, sizeof(struct i82593_conf_block), false);
3561 int i;
3562 printk(KERN_DEBUG "wavelan_cs: config block:");
3563 for(i = 0; i < sizeof(struct i82593_conf_block); i++,c++)
3564 {
3565 if((i % 16) == 0) printk("\n" KERN_DEBUG);
3566 printk("%02x ", *c);
3567 }
3568 printk("\n");
3569 }
3570#endif 3561#endif
3571 3562
3572 /* Copy the config block to the i82593 */ 3563 /* Copy the config block to the i82593 */
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 40b07b988224..3bd3c779fff3 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -698,7 +698,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
698 && !mac->pass_ctrl) 698 && !mac->pass_ctrl)
699 return 0; 699 return 0;
700 700
701 fc = *(__le16 *)buffer; 701 fc = get_unaligned((__le16*)buffer);
702 need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); 702 need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc);
703 703
704 skb = dev_alloc_skb(length + (need_padding ? 2 : 0)); 704 skb = dev_alloc_skb(length + (need_padding ? 2 : 0));
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 14a19baff214..0e6e44689cc6 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -38,7 +38,6 @@ static struct usb_device_id usb_ids[] = {
38 /* ZD1211 */ 38 /* ZD1211 */
39 { USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 }, 39 { USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 },
40 { USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 }, 40 { USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 },
41 { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 },
42 { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 }, 41 { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
43 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 42 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
44 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 }, 43 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
@@ -61,6 +60,7 @@ static struct usb_device_id usb_ids[] = {
61 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, 60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
62 { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, 61 { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
63 /* ZD1211B */ 62 /* ZD1211B */
63 { USB_DEVICE(0x054c, 0x0257), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, 64 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B }, 65 { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
66 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, 66 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
@@ -87,6 +87,7 @@ static struct usb_device_id usb_ids[] = {
87 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 87 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
88 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, 88 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
89 { USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B }, 89 { USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B },
90 { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B },
90 /* "Driverless" devices that need ejecting */ 91 /* "Driverless" devices that need ejecting */
91 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 92 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
92 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, 93 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 3c7a5053f1da..a07580138e81 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -109,7 +109,7 @@ static int gx_fix;
109/* These identify the driver base version and may not be removed. */ 109/* These identify the driver base version and may not be removed. */
110static const char version[] __devinitconst = 110static const char version[] __devinitconst =
111 KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n" 111 KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
112 KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n"; 112 " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
113 113
114MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 114MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
115MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver"); 115MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
@@ -700,12 +700,15 @@ static void yellowfin_tx_timeout(struct net_device *dev)
700 int i; 700 int i;
701 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring); 701 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
702 for (i = 0; i < RX_RING_SIZE; i++) 702 for (i = 0; i < RX_RING_SIZE; i++)
703 printk(" %8.8x", yp->rx_ring[i].result_status); 703 printk(KERN_CONT " %8.8x",
704 printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring); 704 yp->rx_ring[i].result_status);
705 printk(KERN_CONT "\n");
706 printk(KERN_WARNING" Tx ring %p: ", yp->tx_ring);
705 for (i = 0; i < TX_RING_SIZE; i++) 707 for (i = 0; i < TX_RING_SIZE; i++)
706 printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs, 708 printk(KERN_CONT " %4.4x /%8.8x",
707 yp->tx_ring[i].result_status); 709 yp->tx_status[i].tx_errs,
708 printk("\n"); 710 yp->tx_ring[i].result_status);
711 printk(KERN_CONT "\n");
709 } 712 }
710 713
711 /* If the hardware is found to hang regularly, we will update the code 714 /* If the hardware is found to hang regularly, we will update the code
@@ -1216,20 +1219,20 @@ static int yellowfin_close(struct net_device *dev)
1216 1219
1217#if defined(__i386__) 1220#if defined(__i386__)
1218 if (yellowfin_debug > 2) { 1221 if (yellowfin_debug > 2) {
1219 printk("\n"KERN_DEBUG" Tx ring at %8.8llx:\n", 1222 printk(KERN_DEBUG" Tx ring at %8.8llx:\n",
1220 (unsigned long long)yp->tx_ring_dma); 1223 (unsigned long long)yp->tx_ring_dma);
1221 for (i = 0; i < TX_RING_SIZE*2; i++) 1224 for (i = 0; i < TX_RING_SIZE*2; i++)
1222 printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n", 1225 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
1223 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ', 1226 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1224 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr, 1227 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1225 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status); 1228 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1226 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status); 1229 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1227 for (i = 0; i < TX_RING_SIZE; i++) 1230 for (i = 0; i < TX_RING_SIZE; i++)
1228 printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n", 1231 printk(KERN_DEBUG " #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1229 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs, 1232 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1230 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused); 1233 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1231 1234
1232 printk("\n"KERN_DEBUG " Rx ring %8.8llx:\n", 1235 printk(KERN_DEBUG " Rx ring %8.8llx:\n",
1233 (unsigned long long)yp->rx_ring_dma); 1236 (unsigned long long)yp->rx_ring_dma);
1234 for (i = 0; i < RX_RING_SIZE; i++) { 1237 for (i = 0; i < RX_RING_SIZE; i++) {
1235 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n", 1238 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index aee967d7f760..bacaa536fd51 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -9,6 +9,10 @@
9 * out of the OpenFirmware device tree and using it to populate an mii_bus. 9 * out of the OpenFirmware device tree and using it to populate an mii_bus.
10 */ 10 */
11 11
12#include <linux/kernel.h>
13#include <linux/device.h>
14#include <linux/netdevice.h>
15#include <linux/err.h>
12#include <linux/phy.h> 16#include <linux/phy.h>
13#include <linux/of.h> 17#include <linux/of.h>
14#include <linux/of_mdio.h> 18#include <linux/of_mdio.h>
@@ -137,3 +141,41 @@ struct phy_device *of_phy_connect(struct net_device *dev,
137 return phy_connect_direct(dev, phy, hndlr, flags, iface) ? NULL : phy; 141 return phy_connect_direct(dev, phy, hndlr, flags, iface) ? NULL : phy;
138} 142}
139EXPORT_SYMBOL(of_phy_connect); 143EXPORT_SYMBOL(of_phy_connect);
144
145/**
146 * of_phy_connect_fixed_link - Parse fixed-link property and return a dummy phy
147 * @dev: pointer to net_device claiming the phy
148 * @hndlr: Link state callback for the network device
149 * @iface: PHY data interface type
150 *
151 * This function is a temporary stop-gap and will be removed soon. It is
152 * only to support the fs_enet, ucc_geth and gianfar Ethernet drivers. Do
153 * not call this function from new drivers.
154 */
155struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
156 void (*hndlr)(struct net_device *),
157 phy_interface_t iface)
158{
159 struct device_node *net_np;
160 char bus_id[MII_BUS_ID_SIZE + 3];
161 struct phy_device *phy;
162 const u32 *phy_id;
163 int sz;
164
165 if (!dev->dev.parent)
166 return NULL;
167
168 net_np = dev_archdata_get_node(&dev->dev.parent->archdata);
169 if (!net_np)
170 return NULL;
171
172 phy_id = of_get_property(net_np, "fixed-link", &sz);
173 if (!phy_id || sz < sizeof(*phy_id))
174 return NULL;
175
176 sprintf(bus_id, PHY_ID_FMT, "0", phy_id[0]);
177
178 phy = phy_connect(dev, bus_id, hndlr, 0, iface);
179 return IS_ERR(phy) ? NULL : phy;
180}
181EXPORT_SYMBOL(of_phy_connect_fixed_link);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index e1f6ce03705e..3c2270a8300c 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -33,6 +33,7 @@ void oprofile_reset_stats(void)
33 atomic_set(&oprofile_stats.sample_lost_no_mm, 0); 33 atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); 34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35 atomic_set(&oprofile_stats.event_lost_overflow, 0); 35 atomic_set(&oprofile_stats.event_lost_overflow, 0);
36 atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
36} 37}
37 38
38 39
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 5d610cbcfe80..a45b0c0d574e 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -70,7 +70,6 @@
70#undef CCIO_COLLECT_STATS 70#undef CCIO_COLLECT_STATS
71#endif 71#endif
72 72
73#include <linux/proc_fs.h>
74#include <asm/runway.h> /* for proc_runway_root */ 73#include <asm/runway.h> /* for proc_runway_root */
75 74
76#ifdef DEBUG_CCIO_INIT 75#ifdef DEBUG_CCIO_INIT
@@ -1134,7 +1133,7 @@ static const struct file_operations ccio_proc_bitmap_fops = {
1134 .llseek = seq_lseek, 1133 .llseek = seq_lseek,
1135 .release = single_release, 1134 .release = single_release,
1136}; 1135};
1137#endif 1136#endif /* CONFIG_PROC_FS */
1138 1137
1139/** 1138/**
1140 * ccio_find_ioc - Find the ioc in the ioc_list 1139 * ccio_find_ioc - Find the ioc in the ioc_list
@@ -1568,14 +1567,15 @@ static int __init ccio_probe(struct parisc_device *dev)
1568 /* if this fails, no I/O cards will work, so may as well bug */ 1567 /* if this fails, no I/O cards will work, so may as well bug */
1569 BUG_ON(dev->dev.platform_data == NULL); 1568 BUG_ON(dev->dev.platform_data == NULL);
1570 HBA_DATA(dev->dev.platform_data)->iommu = ioc; 1569 HBA_DATA(dev->dev.platform_data)->iommu = ioc;
1571 1570
1571#ifdef CONFIG_PROC_FS
1572 if (ioc_count == 0) { 1572 if (ioc_count == 0) {
1573 proc_create(MODULE_NAME, 0, proc_runway_root, 1573 proc_create(MODULE_NAME, 0, proc_runway_root,
1574 &ccio_proc_info_fops); 1574 &ccio_proc_info_fops);
1575 proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root, 1575 proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root,
1576 &ccio_proc_bitmap_fops); 1576 &ccio_proc_bitmap_fops);
1577 } 1577 }
1578 1578#endif
1579 ioc_count++; 1579 ioc_count++;
1580 1580
1581 parisc_has_iommu(); 1581 parisc_has_iommu();
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 52ae0b1d470c..d69bde6a2343 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -353,7 +353,7 @@ static unsigned int dino_startup_irq(unsigned int irq)
353 return 0; 353 return 0;
354} 354}
355 355
356static struct hw_interrupt_type dino_interrupt_type = { 356static struct irq_chip dino_interrupt_type = {
357 .typename = "GSC-PCI", 357 .typename = "GSC-PCI",
358 .startup = dino_startup_irq, 358 .startup = dino_startup_irq,
359 .shutdown = dino_disable_irq, 359 .shutdown = dino_disable_irq,
@@ -614,7 +614,7 @@ dino_fixup_bus(struct pci_bus *bus)
614 dev_name(&bus->self->dev), i, 614 dev_name(&bus->self->dev), i,
615 bus->self->resource[i].start, 615 bus->self->resource[i].start,
616 bus->self->resource[i].end); 616 bus->self->resource[i].end);
617 pci_assign_resource(bus->self, i); 617 WARN_ON(pci_assign_resource(bus->self, i));
618 DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n", 618 DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n",
619 dev_name(&bus->self->dev), i, 619 dev_name(&bus->self->dev), i,
620 bus->self->resource[i].start, 620 bus->self->resource[i].start,
@@ -1019,22 +1019,22 @@ static int __init dino_probe(struct parisc_device *dev)
1019 ** It's not used to avoid chicken/egg problems 1019 ** It's not used to avoid chicken/egg problems
1020 ** with configuration accessor functions. 1020 ** with configuration accessor functions.
1021 */ 1021 */
1022 bus = pci_scan_bus_parented(&dev->dev, dino_current_bus, 1022 dino_dev->hba.hba_bus = bus = pci_scan_bus_parented(&dev->dev,
1023 &dino_cfg_ops, NULL); 1023 dino_current_bus, &dino_cfg_ops, NULL);
1024
1024 if(bus) { 1025 if(bus) {
1025 pci_bus_add_devices(bus);
1026 /* This code *depends* on scanning being single threaded 1026 /* This code *depends* on scanning being single threaded
1027 * if it isn't, this global bus number count will fail 1027 * if it isn't, this global bus number count will fail
1028 */ 1028 */
1029 dino_current_bus = bus->subordinate + 1; 1029 dino_current_bus = bus->subordinate + 1;
1030 pci_bus_assign_resources(bus); 1030 pci_bus_assign_resources(bus);
1031 pci_bus_add_devices(bus);
1031 } else { 1032 } else {
1032 printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (probably duplicate bus number %d)\n", 1033 printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (duplicate bus number %d?)\n",
1033 dev_name(&dev->dev), dino_current_bus); 1034 dev_name(&dev->dev), dino_current_bus);
1034 /* increment the bus number in case of duplicates */ 1035 /* increment the bus number in case of duplicates */
1035 dino_current_bus++; 1036 dino_current_bus++;
1036 } 1037 }
1037 dino_dev->hba.hba_bus = bus;
1038 return 0; 1038 return 0;
1039} 1039}
1040 1040
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 5b89f404e668..51220749cb65 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -188,7 +188,7 @@ static unsigned int eisa_startup_irq(unsigned int irq)
188 return 0; 188 return 0;
189} 189}
190 190
191static struct hw_interrupt_type eisa_interrupt_type = { 191static struct irq_chip eisa_interrupt_type = {
192 .typename = "EISA", 192 .typename = "EISA",
193 .startup = eisa_startup_irq, 193 .startup = eisa_startup_irq,
194 .shutdown = eisa_disable_irq, 194 .shutdown = eisa_disable_irq,
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 685d94e69d44..8c0b26e9b98a 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -55,7 +55,7 @@ static ssize_t eisa_eeprom_read(struct file * file,
55 ssize_t ret; 55 ssize_t ret;
56 int i; 56 int i;
57 57
58 if (*ppos >= HPEE_MAX_LENGTH) 58 if (*ppos < 0 || *ppos >= HPEE_MAX_LENGTH)
59 return 0; 59 return 0;
60 60
61 count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos; 61 count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos;
diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c
index c709ecc2b7f7..0be1d50645ab 100644
--- a/drivers/parisc/eisa_enumerator.c
+++ b/drivers/parisc/eisa_enumerator.c
@@ -101,7 +101,7 @@ static int configure_memory(const unsigned char *buf,
101 printk("memory %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end); 101 printk("memory %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end);
102 result = request_resource(mem_parent, res); 102 result = request_resource(mem_parent, res);
103 if (result < 0) { 103 if (result < 0) {
104 printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n"); 104 printk(KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
105 return result; 105 return result;
106 } 106 }
107 } 107 }
@@ -191,7 +191,7 @@ static int configure_port(const unsigned char *buf, struct resource *io_parent,
191 printk("ioports %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end); 191 printk("ioports %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end);
192 result = request_resource(io_parent, res); 192 result = request_resource(io_parent, res);
193 if (result < 0) { 193 if (result < 0) {
194 printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n"); 194 printk(KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
195 return result; 195 return result;
196 } 196 }
197 } 197 }
@@ -224,7 +224,7 @@ static int configure_port_init(const unsigned char *buf)
224 case HPEE_PORT_INIT_WIDTH_BYTE: 224 case HPEE_PORT_INIT_WIDTH_BYTE:
225 s=1; 225 s=1;
226 if (c & HPEE_PORT_INIT_MASK) { 226 if (c & HPEE_PORT_INIT_MASK) {
227 printk("\n" KERN_WARNING "port_init: unverified mask attribute\n"); 227 printk(KERN_WARNING "port_init: unverified mask attribute\n");
228 outb((inb(get_16(buf+len+1) & 228 outb((inb(get_16(buf+len+1) &
229 get_8(buf+len+3)) | 229 get_8(buf+len+3)) |
230 get_8(buf+len+4)), get_16(buf+len+1)); 230 get_8(buf+len+4)), get_16(buf+len+1));
@@ -249,7 +249,7 @@ static int configure_port_init(const unsigned char *buf)
249 case HPEE_PORT_INIT_WIDTH_DWORD: 249 case HPEE_PORT_INIT_WIDTH_DWORD:
250 s=4; 250 s=4;
251 if (c & HPEE_PORT_INIT_MASK) { 251 if (c & HPEE_PORT_INIT_MASK) {
252 printk("\n" KERN_WARNING "port_init: unverified mask attribute\n"); 252 printk(KERN_WARNING "port_init: unverified mask attribute\n");
253 outl((inl(get_16(buf+len+1) & 253 outl((inl(get_16(buf+len+1) &
254 get_32(buf+len+3)) | 254 get_32(buf+len+3)) |
255 get_32(buf+len+7)), get_16(buf+len+1)); 255 get_32(buf+len+7)), get_16(buf+len+1));
@@ -259,7 +259,7 @@ static int configure_port_init(const unsigned char *buf)
259 259
260 break; 260 break;
261 default: 261 default:
262 printk("\n" KERN_ERR "Invalid port init word %02x\n", c); 262 printk(KERN_ERR "Invalid port init word %02x\n", c);
263 return 0; 263 return 0;
264 } 264 }
265 265
@@ -297,7 +297,7 @@ static int configure_type_string(const unsigned char *buf)
297 /* just skip past the type field */ 297 /* just skip past the type field */
298 len = get_8(buf); 298 len = get_8(buf);
299 if (len > 80) { 299 if (len > 80) {
300 printk("\n" KERN_ERR "eisa_enumerator: type info field too long (%d, max is 80)\n", len); 300 printk(KERN_ERR "eisa_enumerator: type info field too long (%d, max is 80)\n", len);
301 } 301 }
302 302
303 return 1+len; 303 return 1+len;
@@ -398,7 +398,7 @@ static int parse_slot_config(int slot,
398 } 398 }
399 399
400 if (p0 + function_len < pos) { 400 if (p0 + function_len < pos) {
401 printk("\n" KERN_ERR "eisa_enumerator: function %d length mis-match " 401 printk(KERN_ERR "eisa_enumerator: function %d length mis-match "
402 "got %d, expected %d\n", 402 "got %d, expected %d\n",
403 num_func, pos-p0, function_len); 403 num_func, pos-p0, function_len);
404 res=-1; 404 res=-1;
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index d33632917696..647adc9f85ad 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -148,7 +148,7 @@ static unsigned int gsc_asic_startup_irq(unsigned int irq)
148 return 0; 148 return 0;
149} 149}
150 150
151static struct hw_interrupt_type gsc_asic_interrupt_type = { 151static struct irq_chip gsc_asic_interrupt_type = {
152 .typename = "GSC-ASIC", 152 .typename = "GSC-ASIC",
153 .startup = gsc_asic_startup_irq, 153 .startup = gsc_asic_startup_irq,
154 .shutdown = gsc_asic_disable_irq, 154 .shutdown = gsc_asic_disable_irq,
@@ -158,7 +158,7 @@ static struct hw_interrupt_type gsc_asic_interrupt_type = {
158 .end = no_end_irq, 158 .end = no_end_irq,
159}; 159};
160 160
161int gsc_assign_irq(struct hw_interrupt_type *type, void *data) 161int gsc_assign_irq(struct irq_chip *type, void *data)
162{ 162{
163 static int irq = GSC_IRQ_BASE; 163 static int irq = GSC_IRQ_BASE;
164 struct irq_desc *desc; 164 struct irq_desc *desc;
diff --git a/drivers/parisc/gsc.h b/drivers/parisc/gsc.h
index 762a1babad60..b9d7bfb68e24 100644
--- a/drivers/parisc/gsc.h
+++ b/drivers/parisc/gsc.h
@@ -38,7 +38,7 @@ struct gsc_asic {
38int gsc_common_setup(struct parisc_device *parent, struct gsc_asic *gsc_asic); 38int gsc_common_setup(struct parisc_device *parent, struct gsc_asic *gsc_asic);
39int gsc_alloc_irq(struct gsc_irq *dev); /* dev needs an irq */ 39int gsc_alloc_irq(struct gsc_irq *dev); /* dev needs an irq */
40int gsc_claim_irq(struct gsc_irq *dev, int irq); /* dev needs this irq */ 40int gsc_claim_irq(struct gsc_irq *dev, int irq); /* dev needs this irq */
41int gsc_assign_irq(struct hw_interrupt_type *type, void *data); 41int gsc_assign_irq(struct irq_chip *type, void *data);
42int gsc_find_local_irq(unsigned int irq, int *global_irq, int limit); 42int gsc_find_local_irq(unsigned int irq, int *global_irq, int limit);
43void gsc_fixup_irqs(struct parisc_device *parent, void *ctrl, 43void gsc_fixup_irqs(struct parisc_device *parent, void *ctrl,
44 void (*choose)(struct parisc_device *child, void *ctrl)); 44 void (*choose)(struct parisc_device *child, void *ctrl));
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index 13856415b432..815db175d427 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -62,7 +62,8 @@ static int hppb_probe(struct parisc_device *dev)
62 } 62 }
63 card = card->next; 63 card = card->next;
64 } 64 }
65 printk(KERN_INFO "Found GeckoBoa at 0x%x\n", dev->hpa.start); 65 printk(KERN_INFO "Found GeckoBoa at 0x%llx\n",
66 (unsigned long long) dev->hpa.start);
66 67
67 card->hpa = dev->hpa.start; 68 card->hpa = dev->hpa.start;
68 card->mmio_region.name = "HP-PB Bus"; 69 card->mmio_region.name = "HP-PB Bus";
@@ -73,8 +74,10 @@ static int hppb_probe(struct parisc_device *dev)
73 74
74 status = ccio_request_resource(dev, &card->mmio_region); 75 status = ccio_request_resource(dev, &card->mmio_region);
75 if(status < 0) { 76 if(status < 0) {
76 printk(KERN_ERR "%s: failed to claim HP-PB bus space (%08x, %08x)\n", 77 printk(KERN_ERR "%s: failed to claim HP-PB "
77 __FILE__, card->mmio_region.start, card->mmio_region.end); 78 "bus space (0x%08llx, 0x%08llx)\n",
79 __FILE__, (unsigned long long) card->mmio_region.start,
80 (unsigned long long) card->mmio_region.end);
78 } 81 }
79 82
80 return 0; 83 return 0;
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 4a9cc92d4d18..88e333553212 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -729,7 +729,7 @@ static int iosapic_set_affinity_irq(unsigned int irq,
729} 729}
730#endif 730#endif
731 731
732static struct hw_interrupt_type iosapic_interrupt_type = { 732static struct irq_chip iosapic_interrupt_type = {
733 .typename = "IO-SAPIC-level", 733 .typename = "IO-SAPIC-level",
734 .startup = iosapic_startup_irq, 734 .startup = iosapic_startup_irq,
735 .shutdown = iosapic_disable_irq, 735 .shutdown = iosapic_disable_irq,
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 59fbbf128365..3aeb3279c92a 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -980,28 +980,38 @@ static void
980lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) 980lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
981{ 981{
982 unsigned long bytecnt; 982 unsigned long bytecnt;
983 pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; /* PA_VIEW */
984 pdc_pat_cell_mod_maddr_block_t io_pdc_cell; /* IO_VIEW */
985 long io_count; 983 long io_count;
986 long status; /* PDC return status */ 984 long status; /* PDC return status */
987 long pa_count; 985 long pa_count;
986 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; /* PA_VIEW */
987 pdc_pat_cell_mod_maddr_block_t *io_pdc_cell; /* IO_VIEW */
988 int i; 988 int i;
989 989
990 pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
991 if (!pa_pdc_cell)
992 return;
993
994 io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
995 if (!io_pdc_cell) {
996 kfree(pa_pdc_cell);
997 return;
998 }
999
990 /* return cell module (IO view) */ 1000 /* return cell module (IO view) */
991 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index, 1001 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
992 PA_VIEW, & pa_pdc_cell); 1002 PA_VIEW, pa_pdc_cell);
993 pa_count = pa_pdc_cell.mod[1]; 1003 pa_count = pa_pdc_cell->mod[1];
994 1004
995 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index, 1005 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
996 IO_VIEW, &io_pdc_cell); 1006 IO_VIEW, io_pdc_cell);
997 io_count = io_pdc_cell.mod[1]; 1007 io_count = io_pdc_cell->mod[1];
998 1008
999 /* We've already done this once for device discovery...*/ 1009 /* We've already done this once for device discovery...*/
1000 if (status != PDC_OK) { 1010 if (status != PDC_OK) {
1001 panic("pdc_pat_cell_module() call failed for LBA!\n"); 1011 panic("pdc_pat_cell_module() call failed for LBA!\n");
1002 } 1012 }
1003 1013
1004 if (PAT_GET_ENTITY(pa_pdc_cell.mod_info) != PAT_ENTITY_LBA) { 1014 if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) {
1005 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n"); 1015 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
1006 } 1016 }
1007 1017
@@ -1016,8 +1026,8 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1016 } *p, *io; 1026 } *p, *io;
1017 struct resource *r; 1027 struct resource *r;
1018 1028
1019 p = (void *) &(pa_pdc_cell.mod[2+i*3]); 1029 p = (void *) &(pa_pdc_cell->mod[2+i*3]);
1020 io = (void *) &(io_pdc_cell.mod[2+i*3]); 1030 io = (void *) &(io_pdc_cell->mod[2+i*3]);
1021 1031
1022 /* Convert the PAT range data to PCI "struct resource" */ 1032 /* Convert the PAT range data to PCI "struct resource" */
1023 switch(p->type & 0xff) { 1033 switch(p->type & 0xff) {
@@ -1096,6 +1106,9 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1096 break; 1106 break;
1097 } 1107 }
1098 } 1108 }
1109
1110 kfree(pa_pdc_cell);
1111 kfree(io_pdc_cell);
1099} 1112}
1100#else 1113#else
1101/* keep compiler from complaining about missing declarations */ 1114/* keep compiler from complaining about missing declarations */
@@ -1509,10 +1522,6 @@ lba_driver_probe(struct parisc_device *dev)
1509 lba_bus = lba_dev->hba.hba_bus = 1522 lba_bus = lba_dev->hba.hba_bus =
1510 pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start, 1523 pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
1511 cfg_ops, NULL); 1524 cfg_ops, NULL);
1512 if (lba_bus) {
1513 lba_next_bus = lba_bus->subordinate + 1;
1514 pci_bus_add_devices(lba_bus);
1515 }
1516 1525
1517 /* This is in lieu of calling pci_assign_unassigned_resources() */ 1526 /* This is in lieu of calling pci_assign_unassigned_resources() */
1518 if (is_pdc_pat()) { 1527 if (is_pdc_pat()) {
@@ -1533,7 +1542,6 @@ lba_driver_probe(struct parisc_device *dev)
1533 } 1542 }
1534 pci_enable_bridges(lba_bus); 1543 pci_enable_bridges(lba_bus);
1535 1544
1536
1537 /* 1545 /*
1538 ** Once PCI register ops has walked the bus, access to config 1546 ** Once PCI register ops has walked the bus, access to config
1539 ** space is restricted. Avoids master aborts on config cycles. 1547 ** space is restricted. Avoids master aborts on config cycles.
@@ -1543,6 +1551,11 @@ lba_driver_probe(struct parisc_device *dev)
1543 lba_dev->flags |= LBA_FLAG_SKIP_PROBE; 1551 lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
1544 } 1552 }
1545 1553
1554 if (lba_bus) {
1555 lba_next_bus = lba_bus->subordinate + 1;
1556 pci_bus_add_devices(lba_bus);
1557 }
1558
1546 /* Whew! Finally done! Tell services we got this one covered. */ 1559 /* Whew! Finally done! Tell services we got this one covered. */
1547 return 0; 1560 return 0;
1548} 1561}
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index f9f9a5f1bbd0..13a64bc081b6 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -370,7 +370,7 @@ pdcspath_layer_read(struct pdcspath_entry *entry, char *buf)
370 if (!i) /* entry is not ready */ 370 if (!i) /* entry is not ready */
371 return -ENODATA; 371 return -ENODATA;
372 372
373 for (i = 0; devpath->layers[i] && (likely(i < 6)); i++) 373 for (i = 0; i < 6 && devpath->layers[i]; i++)
374 out += sprintf(out, "%u ", devpath->layers[i]); 374 out += sprintf(out, "%u ", devpath->layers[i]);
375 375
376 out += sprintf(out, "\n"); 376 out += sprintf(out, "\n");
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index d46dd57450ac..123d8fe3427d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -2057,6 +2057,7 @@ void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2057 r->start = (base & ~1UL) | PCI_F_EXTEND; 2057 r->start = (base & ~1UL) | PCI_F_EXTEND;
2058 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK); 2058 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2059 r->end = r->start + size; 2059 r->end = r->start + size;
2060 r->flags = IORESOURCE_MEM;
2060 } 2061 }
2061} 2062}
2062 2063
@@ -2093,4 +2094,5 @@ void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2093 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC; 2094 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2094 r->start += rope * (size + 1); /* adjust base for this rope */ 2095 r->start += rope * (size + 1); /* adjust base for this rope */
2095 r->end = r->start + size; 2096 r->end = r->start + size;
2097 r->flags = IORESOURCE_MEM;
2096} 2098}
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 33e5ade774ca..675f04e6597a 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -325,7 +325,7 @@ static unsigned int superio_startup_irq(unsigned int irq)
325 return 0; 325 return 0;
326} 326}
327 327
328static struct hw_interrupt_type superio_interrupt_type = { 328static struct irq_chip superio_interrupt_type = {
329 .typename = SUPERIO, 329 .typename = SUPERIO,
330 .startup = superio_startup_irq, 330 .startup = superio_startup_irq,
331 .shutdown = superio_disable_irq, 331 .shutdown = superio_disable_irq,
@@ -434,8 +434,8 @@ static void __init superio_parport_init(void)
434 0 /*base_hi*/, 434 0 /*base_hi*/,
435 PAR_IRQ, 435 PAR_IRQ,
436 PARPORT_DMA_NONE /* dma */, 436 PARPORT_DMA_NONE /* dma */,
437 NULL /*struct pci_dev* */), 437 NULL /*struct pci_dev* */,
438 0 /* shared irq flags */ ) 438 0 /* shared irq flags */))
439 439
440 printk(KERN_WARNING PFX "Probing parallel port failed.\n"); 440 printk(KERN_WARNING PFX "Probing parallel port failed.\n");
441#endif /* CONFIG_PARPORT_PC */ 441#endif /* CONFIG_PARPORT_PC */
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1032d5fdbd42..2597145a066e 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2907,6 +2907,7 @@ enum parport_pc_pci_cards {
2907 netmos_9755, 2907 netmos_9755,
2908 netmos_9805, 2908 netmos_9805,
2909 netmos_9815, 2909 netmos_9815,
2910 netmos_9901,
2910 quatech_sppxp100, 2911 quatech_sppxp100,
2911}; 2912};
2912 2913
@@ -2987,7 +2988,7 @@ static struct parport_pc_pci {
2987 /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, 2988 /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
2988 /* netmos_9805 */ { 1, { { 0, -1 }, } }, 2989 /* netmos_9805 */ { 1, { { 0, -1 }, } },
2989 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, 2990 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
2990 2991 /* netmos_9901 */ { 1, { { 0, -1 }, } },
2991 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, 2992 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
2992}; 2993};
2993 2994
@@ -3089,6 +3090,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
3089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 }, 3090 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
3090 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815, 3091 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
3091 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 }, 3092 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
3093 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
3094 0xA000, 0x2000, 0, 0, netmos_9901 },
3092 /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ 3095 /* Quatech SPPXP-100 Parallel port PCI ExpressCard */
3093 { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, 3096 { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
3094 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, 3097 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index a5b9f6ae507b..d703e73fffa7 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -32,7 +32,6 @@
32#include <linux/pci_hotplug.h> 32#include <linux/pci_hotplug.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/smp_lock.h>
36#include <asm/atomic.h> 35#include <asm/atomic.h>
37#include <linux/delay.h> 36#include <linux/delay.h>
38#include <linux/kthread.h> 37#include <linux/kthread.h>
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 2fa47af992a8..0ff689afa757 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -34,7 +34,6 @@
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/wait.h> 36#include <linux/wait.h>
37#include <linux/smp_lock.h>
38#include <linux/pci.h> 37#include <linux/pci.h>
39#include <linux/pci_hotplug.h> 38#include <linux/pci_hotplug.h>
40#include <linux/kthread.h> 39#include <linux/kthread.h>
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index 8450f4a6568a..e6089bdb6e5b 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -33,6 +33,7 @@
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/pci_hotplug.h> 35#include <linux/pci_hotplug.h>
36#include <linux/smp_lock.h>
36#include <linux/debugfs.h> 37#include <linux/debugfs.h>
37#include "cpqphp.h" 38#include "cpqphp.h"
38 39
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 844580489d4d..5c5043f239cf 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -555,6 +555,8 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
555 * @slot: pointer to the &struct hotplug_slot to register 555 * @slot: pointer to the &struct hotplug_slot to register
556 * @devnr: device number 556 * @devnr: device number
557 * @name: name registered with kobject core 557 * @name: name registered with kobject core
558 * @owner: caller module owner
559 * @mod_name: caller module name
558 * 560 *
559 * Registers a hotplug slot with the pci hotplug subsystem, which will allow 561 * Registers a hotplug slot with the pci hotplug subsystem, which will allow
560 * userspace interaction to the slot. 562 * userspace interaction to the slot.
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index ff4034502d24..8aab8edf123e 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -30,7 +30,6 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/smp_lock.h>
34#include <linux/pci.h> 33#include <linux/pci.h>
35#include <linux/workqueue.h> 34#include <linux/workqueue.h>
36#include "../pci.h" 35#include "../pci.h"
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index e53eacd75c8d..2314ad7ee5fe 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -39,7 +39,6 @@
39#include <linux/sysdev.h> 39#include <linux/sysdev.h>
40#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
41#include <asm/iommu.h> 41#include <asm/iommu.h>
42#include <asm/e820.h>
43#include "pci.h" 42#include "pci.h"
44 43
45#define ROOT_SIZE VTD_PAGE_SIZE 44#define ROOT_SIZE VTD_PAGE_SIZE
@@ -57,14 +56,32 @@
57#define MAX_AGAW_WIDTH 64 56#define MAX_AGAW_WIDTH 64
58 57
59#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 58#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
60 60
61#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 61#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 62#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
63#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) 63#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
64 64
65#ifndef PHYSICAL_PAGE_MASK 65
66#define PHYSICAL_PAGE_MASK PAGE_MASK 66/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67#endif 67 are never going to work. */
68static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
69{
70 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
71}
72
73static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
74{
75 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
76}
77static inline unsigned long page_to_dma_pfn(struct page *pg)
78{
79 return mm_to_dma_pfn(page_to_pfn(pg));
80}
81static inline unsigned long virt_to_dma_pfn(void *p)
82{
83 return page_to_dma_pfn(virt_to_page(p));
84}
68 85
69/* global iommu list, set NULL for ignored DMAR units */ 86/* global iommu list, set NULL for ignored DMAR units */
70static struct intel_iommu **g_iommus; 87static struct intel_iommu **g_iommus;
@@ -205,12 +222,17 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
205 222
206static inline u64 dma_pte_addr(struct dma_pte *pte) 223static inline u64 dma_pte_addr(struct dma_pte *pte)
207{ 224{
208 return (pte->val & VTD_PAGE_MASK); 225#ifdef CONFIG_64BIT
226 return pte->val & VTD_PAGE_MASK;
227#else
228 /* Must have a full atomic 64-bit read */
229 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
230#endif
209} 231}
210 232
211static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) 233static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
212{ 234{
213 pte->val |= (addr & VTD_PAGE_MASK); 235 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
214} 236}
215 237
216static inline bool dma_pte_present(struct dma_pte *pte) 238static inline bool dma_pte_present(struct dma_pte *pte)
@@ -218,6 +240,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
218 return (pte->val & 3) != 0; 240 return (pte->val & 3) != 0;
219} 241}
220 242
243static inline int first_pte_in_page(struct dma_pte *pte)
244{
245 return !((unsigned long)pte & ~VTD_PAGE_MASK);
246}
247
221/* 248/*
222 * This domain is a statically identity mapping domain. 249 * This domain is a statically identity mapping domain.
223 * 1. This domain creats a static 1:1 mapping to all usable memory. 250 * 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -245,7 +272,6 @@ struct dmar_domain {
245 struct iova_domain iovad; /* iova's that belong to this domain */ 272 struct iova_domain iovad; /* iova's that belong to this domain */
246 273
247 struct dma_pte *pgd; /* virtual address */ 274 struct dma_pte *pgd; /* virtual address */
248 spinlock_t mapping_lock; /* page table lock */
249 int gaw; /* max guest address width */ 275 int gaw; /* max guest address width */
250 276
251 /* adjusted guest address width, 0 is level 2 30-bit */ 277 /* adjusted guest address width, 0 is level 2 30-bit */
@@ -649,80 +675,78 @@ static inline int width_to_agaw(int width)
649 675
650static inline unsigned int level_to_offset_bits(int level) 676static inline unsigned int level_to_offset_bits(int level)
651{ 677{
652 return (12 + (level - 1) * LEVEL_STRIDE); 678 return (level - 1) * LEVEL_STRIDE;
653} 679}
654 680
655static inline int address_level_offset(u64 addr, int level) 681static inline int pfn_level_offset(unsigned long pfn, int level)
656{ 682{
657 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK); 683 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
658} 684}
659 685
660static inline u64 level_mask(int level) 686static inline unsigned long level_mask(int level)
661{ 687{
662 return ((u64)-1 << level_to_offset_bits(level)); 688 return -1UL << level_to_offset_bits(level);
663} 689}
664 690
665static inline u64 level_size(int level) 691static inline unsigned long level_size(int level)
666{ 692{
667 return ((u64)1 << level_to_offset_bits(level)); 693 return 1UL << level_to_offset_bits(level);
668} 694}
669 695
670static inline u64 align_to_level(u64 addr, int level) 696static inline unsigned long align_to_level(unsigned long pfn, int level)
671{ 697{
672 return ((addr + level_size(level) - 1) & level_mask(level)); 698 return (pfn + level_size(level) - 1) & level_mask(level);
673} 699}
674 700
675static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) 701static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
702 unsigned long pfn)
676{ 703{
677 int addr_width = agaw_to_width(domain->agaw); 704 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
678 struct dma_pte *parent, *pte = NULL; 705 struct dma_pte *parent, *pte = NULL;
679 int level = agaw_to_level(domain->agaw); 706 int level = agaw_to_level(domain->agaw);
680 int offset; 707 int offset;
681 unsigned long flags;
682 708
683 BUG_ON(!domain->pgd); 709 BUG_ON(!domain->pgd);
684 710 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
685 addr &= (((u64)1) << addr_width) - 1;
686 parent = domain->pgd; 711 parent = domain->pgd;
687 712
688 spin_lock_irqsave(&domain->mapping_lock, flags);
689 while (level > 0) { 713 while (level > 0) {
690 void *tmp_page; 714 void *tmp_page;
691 715
692 offset = address_level_offset(addr, level); 716 offset = pfn_level_offset(pfn, level);
693 pte = &parent[offset]; 717 pte = &parent[offset];
694 if (level == 1) 718 if (level == 1)
695 break; 719 break;
696 720
697 if (!dma_pte_present(pte)) { 721 if (!dma_pte_present(pte)) {
722 uint64_t pteval;
723
698 tmp_page = alloc_pgtable_page(); 724 tmp_page = alloc_pgtable_page();
699 725
700 if (!tmp_page) { 726 if (!tmp_page)
701 spin_unlock_irqrestore(&domain->mapping_lock,
702 flags);
703 return NULL; 727 return NULL;
728
729 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
730 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
731 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
732 /* Someone else set it while we were thinking; use theirs. */
733 free_pgtable_page(tmp_page);
734 } else {
735 dma_pte_addr(pte);
736 domain_flush_cache(domain, pte, sizeof(*pte));
704 } 737 }
705 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
706 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
707 /*
708 * high level table always sets r/w, last level page
709 * table control read/write
710 */
711 dma_set_pte_readable(pte);
712 dma_set_pte_writable(pte);
713 domain_flush_cache(domain, pte, sizeof(*pte));
714 } 738 }
715 parent = phys_to_virt(dma_pte_addr(pte)); 739 parent = phys_to_virt(dma_pte_addr(pte));
716 level--; 740 level--;
717 } 741 }
718 742
719 spin_unlock_irqrestore(&domain->mapping_lock, flags);
720 return pte; 743 return pte;
721} 744}
722 745
723/* return address's pte at specific level */ 746/* return address's pte at specific level */
724static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, 747static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
725 int level) 748 unsigned long pfn,
749 int level)
726{ 750{
727 struct dma_pte *parent, *pte = NULL; 751 struct dma_pte *parent, *pte = NULL;
728 int total = agaw_to_level(domain->agaw); 752 int total = agaw_to_level(domain->agaw);
@@ -730,7 +754,7 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
730 754
731 parent = domain->pgd; 755 parent = domain->pgd;
732 while (level <= total) { 756 while (level <= total) {
733 offset = address_level_offset(addr, total); 757 offset = pfn_level_offset(pfn, total);
734 pte = &parent[offset]; 758 pte = &parent[offset];
735 if (level == total) 759 if (level == total)
736 return pte; 760 return pte;
@@ -743,74 +767,82 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
743 return NULL; 767 return NULL;
744} 768}
745 769
746/* clear one page's page table */
747static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
748{
749 struct dma_pte *pte = NULL;
750
751 /* get last level pte */
752 pte = dma_addr_level_pte(domain, addr, 1);
753
754 if (pte) {
755 dma_clear_pte(pte);
756 domain_flush_cache(domain, pte, sizeof(*pte));
757 }
758}
759
760/* clear last level pte, a tlb flush should be followed */ 770/* clear last level pte, a tlb flush should be followed */
761static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) 771static void dma_pte_clear_range(struct dmar_domain *domain,
772 unsigned long start_pfn,
773 unsigned long last_pfn)
762{ 774{
763 int addr_width = agaw_to_width(domain->agaw); 775 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
764 int npages; 776 struct dma_pte *first_pte, *pte;
765 777
766 start &= (((u64)1) << addr_width) - 1; 778 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
767 end &= (((u64)1) << addr_width) - 1; 779 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
768 /* in case it's partial page */
769 start &= PAGE_MASK;
770 end = PAGE_ALIGN(end);
771 npages = (end - start) / VTD_PAGE_SIZE;
772 780
773 /* we don't need lock here, nobody else touches the iova range */ 781 /* we don't need lock here; nobody else touches the iova range */
774 while (npages--) { 782 while (start_pfn <= last_pfn) {
775 dma_pte_clear_one(domain, start); 783 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
776 start += VTD_PAGE_SIZE; 784 if (!pte) {
785 start_pfn = align_to_level(start_pfn + 1, 2);
786 continue;
787 }
788 do {
789 dma_clear_pte(pte);
790 start_pfn++;
791 pte++;
792 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
793
794 domain_flush_cache(domain, first_pte,
795 (void *)pte - (void *)first_pte);
777 } 796 }
778} 797}
779 798
780/* free page table pages. last level pte should already be cleared */ 799/* free page table pages. last level pte should already be cleared */
781static void dma_pte_free_pagetable(struct dmar_domain *domain, 800static void dma_pte_free_pagetable(struct dmar_domain *domain,
782 u64 start, u64 end) 801 unsigned long start_pfn,
802 unsigned long last_pfn)
783{ 803{
784 int addr_width = agaw_to_width(domain->agaw); 804 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
785 struct dma_pte *pte; 805 struct dma_pte *first_pte, *pte;
786 int total = agaw_to_level(domain->agaw); 806 int total = agaw_to_level(domain->agaw);
787 int level; 807 int level;
788 u64 tmp; 808 unsigned long tmp;
789 809
790 start &= (((u64)1) << addr_width) - 1; 810 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
791 end &= (((u64)1) << addr_width) - 1; 811 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
792 812
793 /* we don't need lock here, nobody else touches the iova range */ 813 /* We don't need lock here; nobody else touches the iova range */
794 level = 2; 814 level = 2;
795 while (level <= total) { 815 while (level <= total) {
796 tmp = align_to_level(start, level); 816 tmp = align_to_level(start_pfn, level);
797 if (tmp >= end || (tmp + level_size(level) > end)) 817
818 /* If we can't even clear one PTE at this level, we're done */
819 if (tmp + level_size(level) - 1 > last_pfn)
798 return; 820 return;
799 821
800 while (tmp < end) { 822 while (tmp + level_size(level) - 1 <= last_pfn) {
801 pte = dma_addr_level_pte(domain, tmp, level); 823 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
802 if (pte) { 824 if (!pte) {
803 free_pgtable_page( 825 tmp = align_to_level(tmp + 1, level + 1);
804 phys_to_virt(dma_pte_addr(pte))); 826 continue;
805 dma_clear_pte(pte);
806 domain_flush_cache(domain, pte, sizeof(*pte));
807 } 827 }
808 tmp += level_size(level); 828 do {
829 if (dma_pte_present(pte)) {
830 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
831 dma_clear_pte(pte);
832 }
833 pte++;
834 tmp += level_size(level);
835 } while (!first_pte_in_page(pte) &&
836 tmp + level_size(level) - 1 <= last_pfn);
837
838 domain_flush_cache(domain, first_pte,
839 (void *)pte - (void *)first_pte);
840
809 } 841 }
810 level++; 842 level++;
811 } 843 }
812 /* free pgd */ 844 /* free pgd */
813 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) { 845 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
814 free_pgtable_page(domain->pgd); 846 free_pgtable_page(domain->pgd);
815 domain->pgd = NULL; 847 domain->pgd = NULL;
816 } 848 }
@@ -1036,11 +1068,11 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1036} 1068}
1037 1069
1038static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 1070static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1039 u64 addr, unsigned int pages) 1071 unsigned long pfn, unsigned int pages)
1040{ 1072{
1041 unsigned int mask = ilog2(__roundup_pow_of_two(pages)); 1073 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1074 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1042 1075
1043 BUG_ON(addr & (~VTD_PAGE_MASK));
1044 BUG_ON(pages == 0); 1076 BUG_ON(pages == 0);
1045 1077
1046 /* 1078 /*
@@ -1055,7 +1087,12 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1055 else 1087 else
1056 iommu->flush.flush_iotlb(iommu, did, addr, mask, 1088 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1057 DMA_TLB_PSI_FLUSH); 1089 DMA_TLB_PSI_FLUSH);
1058 if (did) 1090
1091 /*
1092 * In caching mode, domain ID 0 is reserved for non-present to present
1093 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1094 */
1095 if (!cap_caching_mode(iommu->cap) || did)
1059 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); 1096 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1060} 1097}
1061 1098
@@ -1280,7 +1317,6 @@ static void dmar_init_reserved_ranges(void)
1280 struct pci_dev *pdev = NULL; 1317 struct pci_dev *pdev = NULL;
1281 struct iova *iova; 1318 struct iova *iova;
1282 int i; 1319 int i;
1283 u64 addr, size;
1284 1320
1285 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); 1321 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1286 1322
@@ -1303,12 +1339,9 @@ static void dmar_init_reserved_ranges(void)
1303 r = &pdev->resource[i]; 1339 r = &pdev->resource[i];
1304 if (!r->flags || !(r->flags & IORESOURCE_MEM)) 1340 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1305 continue; 1341 continue;
1306 addr = r->start; 1342 iova = reserve_iova(&reserved_iova_list,
1307 addr &= PHYSICAL_PAGE_MASK; 1343 IOVA_PFN(r->start),
1308 size = r->end - addr; 1344 IOVA_PFN(r->end));
1309 size = PAGE_ALIGN(size);
1310 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1311 IOVA_PFN(size + addr) - 1);
1312 if (!iova) 1345 if (!iova)
1313 printk(KERN_ERR "Reserve iova failed\n"); 1346 printk(KERN_ERR "Reserve iova failed\n");
1314 } 1347 }
@@ -1342,7 +1375,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1342 unsigned long sagaw; 1375 unsigned long sagaw;
1343 1376
1344 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); 1377 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1345 spin_lock_init(&domain->mapping_lock);
1346 spin_lock_init(&domain->iommu_lock); 1378 spin_lock_init(&domain->iommu_lock);
1347 1379
1348 domain_reserve_special_ranges(domain); 1380 domain_reserve_special_ranges(domain);
@@ -1389,7 +1421,6 @@ static void domain_exit(struct dmar_domain *domain)
1389{ 1421{
1390 struct dmar_drhd_unit *drhd; 1422 struct dmar_drhd_unit *drhd;
1391 struct intel_iommu *iommu; 1423 struct intel_iommu *iommu;
1392 u64 end;
1393 1424
1394 /* Domain 0 is reserved, so dont process it */ 1425 /* Domain 0 is reserved, so dont process it */
1395 if (!domain) 1426 if (!domain)
@@ -1398,14 +1429,12 @@ static void domain_exit(struct dmar_domain *domain)
1398 domain_remove_dev_info(domain); 1429 domain_remove_dev_info(domain);
1399 /* destroy iovas */ 1430 /* destroy iovas */
1400 put_iova_domain(&domain->iovad); 1431 put_iova_domain(&domain->iovad);
1401 end = DOMAIN_MAX_ADDR(domain->gaw);
1402 end = end & (~PAGE_MASK);
1403 1432
1404 /* clear ptes */ 1433 /* clear ptes */
1405 dma_pte_clear_range(domain, 0, end); 1434 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1406 1435
1407 /* free page tables */ 1436 /* free page tables */
1408 dma_pte_free_pagetable(domain, 0, end); 1437 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1409 1438
1410 for_each_active_iommu(iommu, drhd) 1439 for_each_active_iommu(iommu, drhd)
1411 if (test_bit(iommu->seq_id, &domain->iommu_bmp)) 1440 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
@@ -1476,7 +1505,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1476 } 1505 }
1477 1506
1478 set_bit(num, iommu->domain_ids); 1507 set_bit(num, iommu->domain_ids);
1479 set_bit(iommu->seq_id, &domain->iommu_bmp);
1480 iommu->domains[num] = domain; 1508 iommu->domains[num] = domain;
1481 id = num; 1509 id = num;
1482 } 1510 }
@@ -1619,42 +1647,94 @@ static int domain_context_mapped(struct pci_dev *pdev)
1619 tmp->devfn); 1647 tmp->devfn);
1620} 1648}
1621 1649
1622static int 1650/* Returns a number of VTD pages, but aligned to MM page size */
1623domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, 1651static inline unsigned long aligned_nrpages(unsigned long host_addr,
1624 u64 hpa, size_t size, int prot) 1652 size_t size)
1625{ 1653{
1626 u64 start_pfn, end_pfn; 1654 host_addr &= ~PAGE_MASK;
1627 struct dma_pte *pte; 1655 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1628 int index; 1656}
1629 int addr_width = agaw_to_width(domain->agaw);
1630 1657
1631 hpa &= (((u64)1) << addr_width) - 1; 1658static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1659 struct scatterlist *sg, unsigned long phys_pfn,
1660 unsigned long nr_pages, int prot)
1661{
1662 struct dma_pte *first_pte = NULL, *pte = NULL;
1663 phys_addr_t uninitialized_var(pteval);
1664 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1665 unsigned long sg_res;
1666
1667 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1632 1668
1633 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) 1669 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1634 return -EINVAL; 1670 return -EINVAL;
1635 iova &= PAGE_MASK; 1671
1636 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; 1672 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1637 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; 1673
1638 index = 0; 1674 if (sg)
1639 while (start_pfn < end_pfn) { 1675 sg_res = 0;
1640 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); 1676 else {
1641 if (!pte) 1677 sg_res = nr_pages + 1;
1642 return -ENOMEM; 1678 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1679 }
1680
1681 while (nr_pages--) {
1682 uint64_t tmp;
1683
1684 if (!sg_res) {
1685 sg_res = aligned_nrpages(sg->offset, sg->length);
1686 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1687 sg->dma_length = sg->length;
1688 pteval = page_to_phys(sg_page(sg)) | prot;
1689 }
1690 if (!pte) {
1691 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1692 if (!pte)
1693 return -ENOMEM;
1694 }
1643 /* We don't need lock here, nobody else 1695 /* We don't need lock here, nobody else
1644 * touches the iova range 1696 * touches the iova range
1645 */ 1697 */
1646 BUG_ON(dma_pte_addr(pte)); 1698 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1647 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); 1699 if (tmp) {
1648 dma_set_pte_prot(pte, prot); 1700 static int dumps = 5;
1649 if (prot & DMA_PTE_SNP) 1701 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1650 dma_set_pte_snp(pte); 1702 iov_pfn, tmp, (unsigned long long)pteval);
1651 domain_flush_cache(domain, pte, sizeof(*pte)); 1703 if (dumps) {
1652 start_pfn++; 1704 dumps--;
1653 index++; 1705 debug_dma_dump_mappings(NULL);
1706 }
1707 WARN_ON(1);
1708 }
1709 pte++;
1710 if (!nr_pages || first_pte_in_page(pte)) {
1711 domain_flush_cache(domain, first_pte,
1712 (void *)pte - (void *)first_pte);
1713 pte = NULL;
1714 }
1715 iov_pfn++;
1716 pteval += VTD_PAGE_SIZE;
1717 sg_res--;
1718 if (!sg_res)
1719 sg = sg_next(sg);
1654 } 1720 }
1655 return 0; 1721 return 0;
1656} 1722}
1657 1723
1724static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1725 struct scatterlist *sg, unsigned long nr_pages,
1726 int prot)
1727{
1728 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1729}
1730
1731static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1732 unsigned long phys_pfn, unsigned long nr_pages,
1733 int prot)
1734{
1735 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1736}
1737
1658static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) 1738static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1659{ 1739{
1660 if (!iommu) 1740 if (!iommu)
@@ -1845,58 +1925,61 @@ error:
1845 1925
1846static int iommu_identity_mapping; 1926static int iommu_identity_mapping;
1847 1927
1928static int iommu_domain_identity_map(struct dmar_domain *domain,
1929 unsigned long long start,
1930 unsigned long long end)
1931{
1932 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1933 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1934
1935 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1936 dma_to_mm_pfn(last_vpfn))) {
1937 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1938 return -ENOMEM;
1939 }
1940
1941 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1942 start, end, domain->id);
1943 /*
1944 * RMRR range might have overlap with physical memory range,
1945 * clear it first
1946 */
1947 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
1948
1949 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1950 last_vpfn - first_vpfn + 1,
1951 DMA_PTE_READ|DMA_PTE_WRITE);
1952}
1953
1848static int iommu_prepare_identity_map(struct pci_dev *pdev, 1954static int iommu_prepare_identity_map(struct pci_dev *pdev,
1849 unsigned long long start, 1955 unsigned long long start,
1850 unsigned long long end) 1956 unsigned long long end)
1851{ 1957{
1852 struct dmar_domain *domain; 1958 struct dmar_domain *domain;
1853 unsigned long size;
1854 unsigned long long base;
1855 int ret; 1959 int ret;
1856 1960
1857 printk(KERN_INFO 1961 printk(KERN_INFO
1858 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", 1962 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1859 pci_name(pdev), start, end); 1963 pci_name(pdev), start, end);
1860 if (iommu_identity_mapping) 1964
1861 domain = si_domain; 1965 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1862 else
1863 /* page table init */
1864 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1865 if (!domain) 1966 if (!domain)
1866 return -ENOMEM; 1967 return -ENOMEM;
1867 1968
1868 /* The address might not be aligned */ 1969 ret = iommu_domain_identity_map(domain, start, end);
1869 base = start & PAGE_MASK;
1870 size = end - base;
1871 size = PAGE_ALIGN(size);
1872 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1873 IOVA_PFN(base + size) - 1)) {
1874 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1875 ret = -ENOMEM;
1876 goto error;
1877 }
1878
1879 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1880 size, base, pci_name(pdev));
1881 /*
1882 * RMRR range might have overlap with physical memory range,
1883 * clear it first
1884 */
1885 dma_pte_clear_range(domain, base, base + size);
1886
1887 ret = domain_page_mapping(domain, base, base, size,
1888 DMA_PTE_READ|DMA_PTE_WRITE);
1889 if (ret) 1970 if (ret)
1890 goto error; 1971 goto error;
1891 1972
1892 /* context entry init */ 1973 /* context entry init */
1893 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); 1974 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1894 if (!ret) 1975 if (ret)
1895 return 0; 1976 goto error;
1896error: 1977
1978 return 0;
1979
1980 error:
1897 domain_exit(domain); 1981 domain_exit(domain);
1898 return ret; 1982 return ret;
1899
1900} 1983}
1901 1984
1902static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, 1985static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
@@ -1908,64 +1991,6 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1908 rmrr->end_address + 1); 1991 rmrr->end_address + 1);
1909} 1992}
1910 1993
1911#ifdef CONFIG_DMAR_GFX_WA
1912struct iommu_prepare_data {
1913 struct pci_dev *pdev;
1914 int ret;
1915};
1916
1917static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1918 unsigned long end_pfn, void *datax)
1919{
1920 struct iommu_prepare_data *data;
1921
1922 data = (struct iommu_prepare_data *)datax;
1923
1924 data->ret = iommu_prepare_identity_map(data->pdev,
1925 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1926 return data->ret;
1927
1928}
1929
1930static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1931{
1932 int nid;
1933 struct iommu_prepare_data data;
1934
1935 data.pdev = pdev;
1936 data.ret = 0;
1937
1938 for_each_online_node(nid) {
1939 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1940 if (data.ret)
1941 return data.ret;
1942 }
1943 return data.ret;
1944}
1945
1946static void __init iommu_prepare_gfx_mapping(void)
1947{
1948 struct pci_dev *pdev = NULL;
1949 int ret;
1950
1951 for_each_pci_dev(pdev) {
1952 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
1953 !IS_GFX_DEVICE(pdev))
1954 continue;
1955 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1956 pci_name(pdev));
1957 ret = iommu_prepare_with_active_regions(pdev);
1958 if (ret)
1959 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
1960 }
1961}
1962#else /* !CONFIG_DMAR_GFX_WA */
1963static inline void iommu_prepare_gfx_mapping(void)
1964{
1965 return;
1966}
1967#endif
1968
1969#ifdef CONFIG_DMAR_FLOPPY_WA 1994#ifdef CONFIG_DMAR_FLOPPY_WA
1970static inline void iommu_prepare_isa(void) 1995static inline void iommu_prepare_isa(void)
1971{ 1996{
@@ -1976,12 +2001,12 @@ static inline void iommu_prepare_isa(void)
1976 if (!pdev) 2001 if (!pdev)
1977 return; 2002 return;
1978 2003
1979 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n"); 2004 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
1980 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); 2005 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1981 2006
1982 if (ret) 2007 if (ret)
1983 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, " 2008 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1984 "floppy might not work\n"); 2009 "floppy might not work\n");
1985 2010
1986} 2011}
1987#else 2012#else
@@ -2009,16 +2034,30 @@ static int __init init_context_pass_through(void)
2009} 2034}
2010 2035
2011static int md_domain_init(struct dmar_domain *domain, int guest_width); 2036static int md_domain_init(struct dmar_domain *domain, int guest_width);
2037
2038static int __init si_domain_work_fn(unsigned long start_pfn,
2039 unsigned long end_pfn, void *datax)
2040{
2041 int *ret = datax;
2042
2043 *ret = iommu_domain_identity_map(si_domain,
2044 (uint64_t)start_pfn << PAGE_SHIFT,
2045 (uint64_t)end_pfn << PAGE_SHIFT);
2046 return *ret;
2047
2048}
2049
2012static int si_domain_init(void) 2050static int si_domain_init(void)
2013{ 2051{
2014 struct dmar_drhd_unit *drhd; 2052 struct dmar_drhd_unit *drhd;
2015 struct intel_iommu *iommu; 2053 struct intel_iommu *iommu;
2016 int ret = 0; 2054 int nid, ret = 0;
2017 2055
2018 si_domain = alloc_domain(); 2056 si_domain = alloc_domain();
2019 if (!si_domain) 2057 if (!si_domain)
2020 return -EFAULT; 2058 return -EFAULT;
2021 2059
2060 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2022 2061
2023 for_each_active_iommu(iommu, drhd) { 2062 for_each_active_iommu(iommu, drhd) {
2024 ret = iommu_attach_domain(si_domain, iommu); 2063 ret = iommu_attach_domain(si_domain, iommu);
@@ -2035,6 +2074,12 @@ static int si_domain_init(void)
2035 2074
2036 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; 2075 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2037 2076
2077 for_each_online_node(nid) {
2078 work_with_active_regions(nid, si_domain_work_fn, &ret);
2079 if (ret)
2080 return ret;
2081 }
2082
2038 return 0; 2083 return 0;
2039} 2084}
2040 2085
@@ -2079,9 +2124,49 @@ static int domain_add_dev_info(struct dmar_domain *domain,
2079 return 0; 2124 return 0;
2080} 2125}
2081 2126
2127static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2128{
2129 if (iommu_identity_mapping == 2)
2130 return IS_GFX_DEVICE(pdev);
2131
2132 /*
2133 * We want to start off with all devices in the 1:1 domain, and
2134 * take them out later if we find they can't access all of memory.
2135 *
2136 * However, we can't do this for PCI devices behind bridges,
2137 * because all PCI devices behind the same bridge will end up
2138 * with the same source-id on their transactions.
2139 *
2140 * Practically speaking, we can't change things around for these
2141 * devices at run-time, because we can't be sure there'll be no
2142 * DMA transactions in flight for any of their siblings.
2143 *
2144 * So PCI devices (unless they're on the root bus) as well as
2145 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2146 * the 1:1 domain, just in _case_ one of their siblings turns out
2147 * not to be able to map all of memory.
2148 */
2149 if (!pdev->is_pcie) {
2150 if (!pci_is_root_bus(pdev->bus))
2151 return 0;
2152 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2153 return 0;
2154 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2155 return 0;
2156
2157 /*
2158 * At boot time, we don't yet know if devices will be 64-bit capable.
2159 * Assume that they will -- if they turn out not to be, then we can
2160 * take them out of the 1:1 domain later.
2161 */
2162 if (!startup)
2163 return pdev->dma_mask > DMA_BIT_MASK(32);
2164
2165 return 1;
2166}
2167
2082static int iommu_prepare_static_identity_mapping(void) 2168static int iommu_prepare_static_identity_mapping(void)
2083{ 2169{
2084 int i;
2085 struct pci_dev *pdev = NULL; 2170 struct pci_dev *pdev = NULL;
2086 int ret; 2171 int ret;
2087 2172
@@ -2089,23 +2174,19 @@ static int iommu_prepare_static_identity_mapping(void)
2089 if (ret) 2174 if (ret)
2090 return -EFAULT; 2175 return -EFAULT;
2091 2176
2092 printk(KERN_INFO "IOMMU: Setting identity map:\n");
2093 for_each_pci_dev(pdev) { 2177 for_each_pci_dev(pdev) {
2094 for (i = 0; i < e820.nr_map; i++) { 2178 if (iommu_should_identity_map(pdev, 1)) {
2095 struct e820entry *ei = &e820.map[i]; 2179 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2096 2180 pci_name(pdev));
2097 if (ei->type == E820_RAM) { 2181
2098 ret = iommu_prepare_identity_map(pdev, 2182 ret = domain_context_mapping(si_domain, pdev,
2099 ei->addr, ei->addr + ei->size); 2183 CONTEXT_TT_MULTI_LEVEL);
2100 if (ret) { 2184 if (ret)
2101 printk(KERN_INFO "1:1 mapping to one domain failed.\n"); 2185 return ret;
2102 return -EFAULT; 2186 ret = domain_add_dev_info(si_domain, pdev);
2103 } 2187 if (ret)
2104 } 2188 return ret;
2105 } 2189 }
2106 ret = domain_add_dev_info(si_domain, pdev);
2107 if (ret)
2108 return ret;
2109 } 2190 }
2110 2191
2111 return 0; 2192 return 0;
@@ -2260,6 +2341,10 @@ int __init init_dmars(void)
2260 * identity mapping if iommu_identity_mapping is set. 2341 * identity mapping if iommu_identity_mapping is set.
2261 */ 2342 */
2262 if (!iommu_pass_through) { 2343 if (!iommu_pass_through) {
2344#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2345 if (!iommu_identity_mapping)
2346 iommu_identity_mapping = 2;
2347#endif
2263 if (iommu_identity_mapping) 2348 if (iommu_identity_mapping)
2264 iommu_prepare_static_identity_mapping(); 2349 iommu_prepare_static_identity_mapping();
2265 /* 2350 /*
@@ -2293,8 +2378,6 @@ int __init init_dmars(void)
2293 } 2378 }
2294 } 2379 }
2295 2380
2296 iommu_prepare_gfx_mapping();
2297
2298 iommu_prepare_isa(); 2381 iommu_prepare_isa();
2299 } 2382 }
2300 2383
@@ -2339,50 +2422,32 @@ error:
2339 return ret; 2422 return ret;
2340} 2423}
2341 2424
2342static inline u64 aligned_size(u64 host_addr, size_t size) 2425/* This takes a number of _MM_ pages, not VTD pages */
2343{ 2426static struct iova *intel_alloc_iova(struct device *dev,
2344 u64 addr; 2427 struct dmar_domain *domain,
2345 addr = (host_addr & (~PAGE_MASK)) + size; 2428 unsigned long nrpages, uint64_t dma_mask)
2346 return PAGE_ALIGN(addr);
2347}
2348
2349struct iova *
2350iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
2351{
2352 struct iova *piova;
2353
2354 /* Make sure it's in range */
2355 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
2356 if (!size || (IOVA_START_ADDR + size > end))
2357 return NULL;
2358
2359 piova = alloc_iova(&domain->iovad,
2360 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
2361 return piova;
2362}
2363
2364static struct iova *
2365__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
2366 size_t size, u64 dma_mask)
2367{ 2429{
2368 struct pci_dev *pdev = to_pci_dev(dev); 2430 struct pci_dev *pdev = to_pci_dev(dev);
2369 struct iova *iova = NULL; 2431 struct iova *iova = NULL;
2370 2432
2371 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac) 2433 /* Restrict dma_mask to the width that the iommu can handle */
2372 iova = iommu_alloc_iova(domain, size, dma_mask); 2434 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2373 else { 2435
2436 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2374 /* 2437 /*
2375 * First try to allocate an io virtual address in 2438 * First try to allocate an io virtual address in
2376 * DMA_BIT_MASK(32) and if that fails then try allocating 2439 * DMA_BIT_MASK(32) and if that fails then try allocating
2377 * from higher range 2440 * from higher range
2378 */ 2441 */
2379 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32)); 2442 iova = alloc_iova(&domain->iovad, nrpages,
2380 if (!iova) 2443 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2381 iova = iommu_alloc_iova(domain, size, dma_mask); 2444 if (iova)
2382 } 2445 return iova;
2383 2446 }
2384 if (!iova) { 2447 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2385 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev)); 2448 if (unlikely(!iova)) {
2449 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2450 nrpages, pci_name(pdev));
2386 return NULL; 2451 return NULL;
2387 } 2452 }
2388 2453
@@ -2424,16 +2489,24 @@ static int iommu_dummy(struct pci_dev *pdev)
2424} 2489}
2425 2490
2426/* Check if the pdev needs to go through non-identity map and unmap process.*/ 2491/* Check if the pdev needs to go through non-identity map and unmap process.*/
2427static int iommu_no_mapping(struct pci_dev *pdev) 2492static int iommu_no_mapping(struct device *dev)
2428{ 2493{
2494 struct pci_dev *pdev;
2429 int found; 2495 int found;
2430 2496
2497 if (unlikely(dev->bus != &pci_bus_type))
2498 return 1;
2499
2500 pdev = to_pci_dev(dev);
2501 if (iommu_dummy(pdev))
2502 return 1;
2503
2431 if (!iommu_identity_mapping) 2504 if (!iommu_identity_mapping)
2432 return iommu_dummy(pdev); 2505 return 0;
2433 2506
2434 found = identity_mapping(pdev); 2507 found = identity_mapping(pdev);
2435 if (found) { 2508 if (found) {
2436 if (pdev->dma_mask > DMA_BIT_MASK(32)) 2509 if (iommu_should_identity_map(pdev, 0))
2437 return 1; 2510 return 1;
2438 else { 2511 else {
2439 /* 2512 /*
@@ -2450,9 +2523,12 @@ static int iommu_no_mapping(struct pci_dev *pdev)
2450 * In case of a detached 64 bit DMA device from vm, the device 2523 * In case of a detached 64 bit DMA device from vm, the device
2451 * is put into si_domain for identity mapping. 2524 * is put into si_domain for identity mapping.
2452 */ 2525 */
2453 if (pdev->dma_mask > DMA_BIT_MASK(32)) { 2526 if (iommu_should_identity_map(pdev, 0)) {
2454 int ret; 2527 int ret;
2455 ret = domain_add_dev_info(si_domain, pdev); 2528 ret = domain_add_dev_info(si_domain, pdev);
2529 if (ret)
2530 return 0;
2531 ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2456 if (!ret) { 2532 if (!ret) {
2457 printk(KERN_INFO "64bit %s uses identity mapping\n", 2533 printk(KERN_INFO "64bit %s uses identity mapping\n",
2458 pci_name(pdev)); 2534 pci_name(pdev));
@@ -2461,7 +2537,7 @@ static int iommu_no_mapping(struct pci_dev *pdev)
2461 } 2537 }
2462 } 2538 }
2463 2539
2464 return iommu_dummy(pdev); 2540 return 0;
2465} 2541}
2466 2542
2467static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, 2543static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
@@ -2474,10 +2550,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2474 int prot = 0; 2550 int prot = 0;
2475 int ret; 2551 int ret;
2476 struct intel_iommu *iommu; 2552 struct intel_iommu *iommu;
2553 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2477 2554
2478 BUG_ON(dir == DMA_NONE); 2555 BUG_ON(dir == DMA_NONE);
2479 2556
2480 if (iommu_no_mapping(pdev)) 2557 if (iommu_no_mapping(hwdev))
2481 return paddr; 2558 return paddr;
2482 2559
2483 domain = get_valid_domain_for_dev(pdev); 2560 domain = get_valid_domain_for_dev(pdev);
@@ -2485,14 +2562,13 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2485 return 0; 2562 return 0;
2486 2563
2487 iommu = domain_get_iommu(domain); 2564 iommu = domain_get_iommu(domain);
2488 size = aligned_size((u64)paddr, size); 2565 size = aligned_nrpages(paddr, size);
2489 2566
2490 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2567 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2568 pdev->dma_mask);
2491 if (!iova) 2569 if (!iova)
2492 goto error; 2570 goto error;
2493 2571
2494 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2495
2496 /* 2572 /*
2497 * Check if DMAR supports zero-length reads on write only 2573 * Check if DMAR supports zero-length reads on write only
2498 * mappings.. 2574 * mappings..
@@ -2508,20 +2584,20 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2508 * might have two guest_addr mapping to the same host paddr, but this 2584 * might have two guest_addr mapping to the same host paddr, but this
2509 * is not a big problem 2585 * is not a big problem
2510 */ 2586 */
2511 ret = domain_page_mapping(domain, start_paddr, 2587 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2512 ((u64)paddr) & PHYSICAL_PAGE_MASK, 2588 mm_to_dma_pfn(paddr_pfn), size, prot);
2513 size, prot);
2514 if (ret) 2589 if (ret)
2515 goto error; 2590 goto error;
2516 2591
2517 /* it's a non-present to present mapping. Only flush if caching mode */ 2592 /* it's a non-present to present mapping. Only flush if caching mode */
2518 if (cap_caching_mode(iommu->cap)) 2593 if (cap_caching_mode(iommu->cap))
2519 iommu_flush_iotlb_psi(iommu, 0, start_paddr, 2594 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
2520 size >> VTD_PAGE_SHIFT);
2521 else 2595 else
2522 iommu_flush_write_buffer(iommu); 2596 iommu_flush_write_buffer(iommu);
2523 2597
2524 return start_paddr + ((u64)paddr & (~PAGE_MASK)); 2598 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2599 start_paddr += paddr & ~PAGE_MASK;
2600 return start_paddr;
2525 2601
2526error: 2602error:
2527 if (iova) 2603 if (iova)
@@ -2614,11 +2690,11 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2614{ 2690{
2615 struct pci_dev *pdev = to_pci_dev(dev); 2691 struct pci_dev *pdev = to_pci_dev(dev);
2616 struct dmar_domain *domain; 2692 struct dmar_domain *domain;
2617 unsigned long start_addr; 2693 unsigned long start_pfn, last_pfn;
2618 struct iova *iova; 2694 struct iova *iova;
2619 struct intel_iommu *iommu; 2695 struct intel_iommu *iommu;
2620 2696
2621 if (iommu_no_mapping(pdev)) 2697 if (iommu_no_mapping(dev))
2622 return; 2698 return;
2623 2699
2624 domain = find_domain(pdev); 2700 domain = find_domain(pdev);
@@ -2627,22 +2703,25 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2627 iommu = domain_get_iommu(domain); 2703 iommu = domain_get_iommu(domain);
2628 2704
2629 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); 2705 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2630 if (!iova) 2706 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2707 (unsigned long long)dev_addr))
2631 return; 2708 return;
2632 2709
2633 start_addr = iova->pfn_lo << PAGE_SHIFT; 2710 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2634 size = aligned_size((u64)dev_addr, size); 2711 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2635 2712
2636 pr_debug("Device %s unmapping: %zx@%llx\n", 2713 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2637 pci_name(pdev), size, (unsigned long long)start_addr); 2714 pci_name(pdev), start_pfn, last_pfn);
2638 2715
2639 /* clear the whole page */ 2716 /* clear the whole page */
2640 dma_pte_clear_range(domain, start_addr, start_addr + size); 2717 dma_pte_clear_range(domain, start_pfn, last_pfn);
2718
2641 /* free page tables */ 2719 /* free page tables */
2642 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2720 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2721
2643 if (intel_iommu_strict) { 2722 if (intel_iommu_strict) {
2644 iommu_flush_iotlb_psi(iommu, domain->id, start_addr, 2723 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2645 size >> VTD_PAGE_SHIFT); 2724 last_pfn - start_pfn + 1);
2646 /* free iova */ 2725 /* free iova */
2647 __free_iova(&domain->iovad, iova); 2726 __free_iova(&domain->iovad, iova);
2648 } else { 2727 } else {
@@ -2700,17 +2779,13 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2700 int nelems, enum dma_data_direction dir, 2779 int nelems, enum dma_data_direction dir,
2701 struct dma_attrs *attrs) 2780 struct dma_attrs *attrs)
2702{ 2781{
2703 int i;
2704 struct pci_dev *pdev = to_pci_dev(hwdev); 2782 struct pci_dev *pdev = to_pci_dev(hwdev);
2705 struct dmar_domain *domain; 2783 struct dmar_domain *domain;
2706 unsigned long start_addr; 2784 unsigned long start_pfn, last_pfn;
2707 struct iova *iova; 2785 struct iova *iova;
2708 size_t size = 0;
2709 phys_addr_t addr;
2710 struct scatterlist *sg;
2711 struct intel_iommu *iommu; 2786 struct intel_iommu *iommu;
2712 2787
2713 if (iommu_no_mapping(pdev)) 2788 if (iommu_no_mapping(hwdev))
2714 return; 2789 return;
2715 2790
2716 domain = find_domain(pdev); 2791 domain = find_domain(pdev);
@@ -2719,22 +2794,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2719 iommu = domain_get_iommu(domain); 2794 iommu = domain_get_iommu(domain);
2720 2795
2721 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); 2796 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2722 if (!iova) 2797 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2798 (unsigned long long)sglist[0].dma_address))
2723 return; 2799 return;
2724 for_each_sg(sglist, sg, nelems, i) {
2725 addr = page_to_phys(sg_page(sg)) + sg->offset;
2726 size += aligned_size((u64)addr, sg->length);
2727 }
2728 2800
2729 start_addr = iova->pfn_lo << PAGE_SHIFT; 2801 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2802 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2730 2803
2731 /* clear the whole page */ 2804 /* clear the whole page */
2732 dma_pte_clear_range(domain, start_addr, start_addr + size); 2805 dma_pte_clear_range(domain, start_pfn, last_pfn);
2806
2733 /* free page tables */ 2807 /* free page tables */
2734 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2808 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2735 2809
2736 iommu_flush_iotlb_psi(iommu, domain->id, start_addr, 2810 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2737 size >> VTD_PAGE_SHIFT); 2811 (last_pfn - start_pfn + 1));
2738 2812
2739 /* free iova */ 2813 /* free iova */
2740 __free_iova(&domain->iovad, iova); 2814 __free_iova(&domain->iovad, iova);
@@ -2757,21 +2831,20 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2757static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 2831static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2758 enum dma_data_direction dir, struct dma_attrs *attrs) 2832 enum dma_data_direction dir, struct dma_attrs *attrs)
2759{ 2833{
2760 phys_addr_t addr;
2761 int i; 2834 int i;
2762 struct pci_dev *pdev = to_pci_dev(hwdev); 2835 struct pci_dev *pdev = to_pci_dev(hwdev);
2763 struct dmar_domain *domain; 2836 struct dmar_domain *domain;
2764 size_t size = 0; 2837 size_t size = 0;
2765 int prot = 0; 2838 int prot = 0;
2766 size_t offset = 0; 2839 size_t offset_pfn = 0;
2767 struct iova *iova = NULL; 2840 struct iova *iova = NULL;
2768 int ret; 2841 int ret;
2769 struct scatterlist *sg; 2842 struct scatterlist *sg;
2770 unsigned long start_addr; 2843 unsigned long start_vpfn;
2771 struct intel_iommu *iommu; 2844 struct intel_iommu *iommu;
2772 2845
2773 BUG_ON(dir == DMA_NONE); 2846 BUG_ON(dir == DMA_NONE);
2774 if (iommu_no_mapping(pdev)) 2847 if (iommu_no_mapping(hwdev))
2775 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); 2848 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2776 2849
2777 domain = get_valid_domain_for_dev(pdev); 2850 domain = get_valid_domain_for_dev(pdev);
@@ -2780,12 +2853,11 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2780 2853
2781 iommu = domain_get_iommu(domain); 2854 iommu = domain_get_iommu(domain);
2782 2855
2783 for_each_sg(sglist, sg, nelems, i) { 2856 for_each_sg(sglist, sg, nelems, i)
2784 addr = page_to_phys(sg_page(sg)) + sg->offset; 2857 size += aligned_nrpages(sg->offset, sg->length);
2785 size += aligned_size((u64)addr, sg->length);
2786 }
2787 2858
2788 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2859 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2860 pdev->dma_mask);
2789 if (!iova) { 2861 if (!iova) {
2790 sglist->dma_length = 0; 2862 sglist->dma_length = 0;
2791 return 0; 2863 return 0;
@@ -2801,35 +2873,24 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2801 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 2873 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2802 prot |= DMA_PTE_WRITE; 2874 prot |= DMA_PTE_WRITE;
2803 2875
2804 start_addr = iova->pfn_lo << PAGE_SHIFT; 2876 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2805 offset = 0; 2877
2806 for_each_sg(sglist, sg, nelems, i) { 2878 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
2807 addr = page_to_phys(sg_page(sg)) + sg->offset; 2879 if (unlikely(ret)) {
2808 size = aligned_size((u64)addr, sg->length); 2880 /* clear the page */
2809 ret = domain_page_mapping(domain, start_addr + offset, 2881 dma_pte_clear_range(domain, start_vpfn,
2810 ((u64)addr) & PHYSICAL_PAGE_MASK, 2882 start_vpfn + size - 1);
2811 size, prot); 2883 /* free page tables */
2812 if (ret) { 2884 dma_pte_free_pagetable(domain, start_vpfn,
2813 /* clear the page */ 2885 start_vpfn + size - 1);
2814 dma_pte_clear_range(domain, start_addr, 2886 /* free iova */
2815 start_addr + offset); 2887 __free_iova(&domain->iovad, iova);
2816 /* free page tables */ 2888 return 0;
2817 dma_pte_free_pagetable(domain, start_addr,
2818 start_addr + offset);
2819 /* free iova */
2820 __free_iova(&domain->iovad, iova);
2821 return 0;
2822 }
2823 sg->dma_address = start_addr + offset +
2824 ((u64)addr & (~PAGE_MASK));
2825 sg->dma_length = sg->length;
2826 offset += size;
2827 } 2889 }
2828 2890
2829 /* it's a non-present to present mapping. Only flush if caching mode */ 2891 /* it's a non-present to present mapping. Only flush if caching mode */
2830 if (cap_caching_mode(iommu->cap)) 2892 if (cap_caching_mode(iommu->cap))
2831 iommu_flush_iotlb_psi(iommu, 0, start_addr, 2893 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
2832 offset >> VTD_PAGE_SHIFT);
2833 else 2894 else
2834 iommu_flush_write_buffer(iommu); 2895 iommu_flush_write_buffer(iommu);
2835 2896
@@ -3334,7 +3395,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
3334 int adjust_width; 3395 int adjust_width;
3335 3396
3336 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); 3397 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3337 spin_lock_init(&domain->mapping_lock);
3338 spin_lock_init(&domain->iommu_lock); 3398 spin_lock_init(&domain->iommu_lock);
3339 3399
3340 domain_reserve_special_ranges(domain); 3400 domain_reserve_special_ranges(domain);
@@ -3348,6 +3408,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
3348 3408
3349 domain->iommu_count = 0; 3409 domain->iommu_count = 0;
3350 domain->iommu_coherency = 0; 3410 domain->iommu_coherency = 0;
3411 domain->iommu_snooping = 0;
3351 domain->max_addr = 0; 3412 domain->max_addr = 0;
3352 3413
3353 /* always allocate the top pgd */ 3414 /* always allocate the top pgd */
@@ -3388,8 +3449,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
3388 3449
3389static void vm_domain_exit(struct dmar_domain *domain) 3450static void vm_domain_exit(struct dmar_domain *domain)
3390{ 3451{
3391 u64 end;
3392
3393 /* Domain 0 is reserved, so dont process it */ 3452 /* Domain 0 is reserved, so dont process it */
3394 if (!domain) 3453 if (!domain)
3395 return; 3454 return;
@@ -3397,14 +3456,12 @@ static void vm_domain_exit(struct dmar_domain *domain)
3397 vm_domain_remove_all_dev_info(domain); 3456 vm_domain_remove_all_dev_info(domain);
3398 /* destroy iovas */ 3457 /* destroy iovas */
3399 put_iova_domain(&domain->iovad); 3458 put_iova_domain(&domain->iovad);
3400 end = DOMAIN_MAX_ADDR(domain->gaw);
3401 end = end & (~VTD_PAGE_MASK);
3402 3459
3403 /* clear ptes */ 3460 /* clear ptes */
3404 dma_pte_clear_range(domain, 0, end); 3461 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3405 3462
3406 /* free page tables */ 3463 /* free page tables */
3407 dma_pte_free_pagetable(domain, 0, end); 3464 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3408 3465
3409 iommu_free_vm_domain(domain); 3466 iommu_free_vm_domain(domain);
3410 free_domain_mem(domain); 3467 free_domain_mem(domain);
@@ -3513,7 +3570,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3513 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) 3570 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3514 prot |= DMA_PTE_SNP; 3571 prot |= DMA_PTE_SNP;
3515 3572
3516 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); 3573 max_addr = iova + size;
3517 if (dmar_domain->max_addr < max_addr) { 3574 if (dmar_domain->max_addr < max_addr) {
3518 int min_agaw; 3575 int min_agaw;
3519 u64 end; 3576 u64 end;
@@ -3531,8 +3588,11 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3531 } 3588 }
3532 dmar_domain->max_addr = max_addr; 3589 dmar_domain->max_addr = max_addr;
3533 } 3590 }
3534 3591 /* Round up size to next multiple of PAGE_SIZE, if it and
3535 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot); 3592 the low bits of hpa would take us onto the next page */
3593 size = aligned_nrpages(hpa, size);
3594 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3595 hpa >> VTD_PAGE_SHIFT, size, prot);
3536 return ret; 3596 return ret;
3537} 3597}
3538 3598
@@ -3540,15 +3600,15 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain,
3540 unsigned long iova, size_t size) 3600 unsigned long iova, size_t size)
3541{ 3601{
3542 struct dmar_domain *dmar_domain = domain->priv; 3602 struct dmar_domain *dmar_domain = domain->priv;
3543 dma_addr_t base;
3544 3603
3545 /* The address might not be aligned */ 3604 if (!size)
3546 base = iova & VTD_PAGE_MASK; 3605 return;
3547 size = VTD_PAGE_ALIGN(size); 3606
3548 dma_pte_clear_range(dmar_domain, base, base + size); 3607 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3608 (iova + size - 1) >> VTD_PAGE_SHIFT);
3549 3609
3550 if (dmar_domain->max_addr == base + size) 3610 if (dmar_domain->max_addr == iova + size)
3551 dmar_domain->max_addr = base; 3611 dmar_domain->max_addr = iova;
3552} 3612}
3553 3613
3554static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 3614static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3558,7 +3618,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3558 struct dma_pte *pte; 3618 struct dma_pte *pte;
3559 u64 phys = 0; 3619 u64 phys = 0;
3560 3620
3561 pte = addr_to_dma_pte(dmar_domain, iova); 3621 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
3562 if (pte) 3622 if (pte)
3563 phys = dma_pte_addr(pte); 3623 phys = dma_pte_addr(pte);
3564 3624
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 2287116e9822..46dd440e2315 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -1,9 +1,19 @@
1/* 1/*
2 * Copyright (c) 2006, Intel Corporation. 2 * Copyright © 2006-2009, Intel Corporation.
3 * 3 *
4 * This file is released under the GPLv2. 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
5 * 16 *
6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 */ 18 */
9 19
@@ -123,7 +133,15 @@ move_left:
123 /* Insert the new_iova into domain rbtree by holding writer lock */ 133 /* Insert the new_iova into domain rbtree by holding writer lock */
124 /* Add new node and rebalance tree. */ 134 /* Add new node and rebalance tree. */
125 { 135 {
126 struct rb_node **entry = &((prev)), *parent = NULL; 136 struct rb_node **entry, *parent = NULL;
137
138 /* If we have 'prev', it's a valid place to start the
139 insertion. Otherwise, start from the root. */
140 if (prev)
141 entry = &prev;
142 else
143 entry = &iovad->rbroot.rb_node;
144
127 /* Figure out where to put new node */ 145 /* Figure out where to put new node */
128 while (*entry) { 146 while (*entry) {
129 struct iova *this = container_of(*entry, 147 struct iova *this = container_of(*entry,
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d9f06fbfa0bf..d986afb7032b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -127,17 +127,23 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
127 * reliably as devices without an INTx disable bit will then generate a 127 * reliably as devices without an INTx disable bit will then generate a
128 * level IRQ which will never be cleared. 128 * level IRQ which will never be cleared.
129 */ 129 */
130static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 130static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
131{ 131{
132 u32 mask_bits = desc->masked; 132 u32 mask_bits = desc->masked;
133 133
134 if (!desc->msi_attrib.maskbit) 134 if (!desc->msi_attrib.maskbit)
135 return; 135 return 0;
136 136
137 mask_bits &= ~mask; 137 mask_bits &= ~mask;
138 mask_bits |= flag; 138 mask_bits |= flag;
139 pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); 139 pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
140 desc->masked = mask_bits; 140
141 return mask_bits;
142}
143
144static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
145{
146 desc->masked = __msi_mask_irq(desc, mask, flag);
141} 147}
142 148
143/* 149/*
@@ -147,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
147 * file. This saves a few milliseconds when initialising devices with lots 153 * file. This saves a few milliseconds when initialising devices with lots
148 * of MSI-X interrupts. 154 * of MSI-X interrupts.
149 */ 155 */
150static void msix_mask_irq(struct msi_desc *desc, u32 flag) 156static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
151{ 157{
152 u32 mask_bits = desc->masked; 158 u32 mask_bits = desc->masked;
153 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 159 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
154 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 160 PCI_MSIX_ENTRY_VECTOR_CTRL;
155 mask_bits &= ~1; 161 mask_bits &= ~1;
156 mask_bits |= flag; 162 mask_bits |= flag;
157 writel(mask_bits, desc->mask_base + offset); 163 writel(mask_bits, desc->mask_base + offset);
158 desc->masked = mask_bits; 164
165 return mask_bits;
166}
167
168static void msix_mask_irq(struct msi_desc *desc, u32 flag)
169{
170 desc->masked = __msix_mask_irq(desc, flag);
159} 171}
160 172
161static void msi_set_mask_bit(unsigned irq, u32 flag) 173static void msi_set_mask_bit(unsigned irq, u32 flag)
@@ -188,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
188 void __iomem *base = entry->mask_base + 200 void __iomem *base = entry->mask_base +
189 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 201 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
190 202
191 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 203 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
192 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 204 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
193 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); 205 msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
194 } else { 206 } else {
195 struct pci_dev *dev = entry->dev; 207 struct pci_dev *dev = entry->dev;
196 int pos = entry->msi_attrib.pos; 208 int pos = entry->msi_attrib.pos;
@@ -225,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
225 base = entry->mask_base + 237 base = entry->mask_base +
226 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 238 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
227 239
228 writel(msg->address_lo, 240 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
229 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 241 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
230 writel(msg->address_hi, 242 writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
231 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
232 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
233 } else { 243 } else {
234 struct pci_dev *dev = entry->dev; 244 struct pci_dev *dev = entry->dev;
235 int pos = entry->msi_attrib.pos; 245 int pos = entry->msi_attrib.pos;
@@ -385,6 +395,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
385 /* Configure MSI capability structure */ 395 /* Configure MSI capability structure */
386 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); 396 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
387 if (ret) { 397 if (ret) {
398 msi_mask_irq(entry, mask, ~mask);
388 msi_free_irqs(dev); 399 msi_free_irqs(dev);
389 return ret; 400 return ret;
390 } 401 }
@@ -439,8 +450,14 @@ static int msix_capability_init(struct pci_dev *dev,
439 450
440 for (i = 0; i < nvec; i++) { 451 for (i = 0; i < nvec; i++) {
441 entry = alloc_msi_entry(dev); 452 entry = alloc_msi_entry(dev);
442 if (!entry) 453 if (!entry) {
443 break; 454 if (!i)
455 iounmap(base);
456 else
457 msi_free_irqs(dev);
458 /* No enough memory. Don't try again */
459 return -ENOMEM;
460 }
444 461
445 j = entries[i].entry; 462 j = entries[i].entry;
446 entry->msi_attrib.is_msix = 1; 463 entry->msi_attrib.is_msix = 1;
@@ -487,7 +504,7 @@ static int msix_capability_init(struct pci_dev *dev,
487 set_irq_msi(entry->irq, entry); 504 set_irq_msi(entry->irq, entry);
488 j = entries[i].entry; 505 j = entries[i].entry;
489 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + 506 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
490 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 507 PCI_MSIX_ENTRY_VECTOR_CTRL);
491 msix_mask_irq(entry, 1); 508 msix_mask_irq(entry, 1);
492 i++; 509 i++;
493 } 510 }
@@ -611,9 +628,11 @@ void pci_msi_shutdown(struct pci_dev *dev)
611 pci_intx_for_msi(dev, 1); 628 pci_intx_for_msi(dev, 1);
612 dev->msi_enabled = 0; 629 dev->msi_enabled = 0;
613 630
631 /* Return the device with MSI unmasked as initial states */
614 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl); 632 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
615 mask = msi_capable_mask(ctrl); 633 mask = msi_capable_mask(ctrl);
616 msi_mask_irq(desc, mask, ~mask); 634 /* Keep cached state to be restored */
635 __msi_mask_irq(desc, mask, ~mask);
617 636
618 /* Restore dev->irq to its default pin-assertion irq */ 637 /* Restore dev->irq to its default pin-assertion irq */
619 dev->irq = desc->msi_attrib.default_irq; 638 dev->irq = desc->msi_attrib.default_irq;
@@ -653,7 +672,6 @@ static int msi_free_irqs(struct pci_dev* dev)
653 672
654 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 673 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
655 if (entry->msi_attrib.is_msix) { 674 if (entry->msi_attrib.is_msix) {
656 msix_mask_irq(entry, 1);
657 if (list_is_last(&entry->list, &dev->msi_list)) 675 if (list_is_last(&entry->list, &dev->msi_list))
658 iounmap(entry->mask_base); 676 iounmap(entry->mask_base);
659 } 677 }
@@ -741,9 +759,17 @@ static void msix_free_all_irqs(struct pci_dev *dev)
741 759
742void pci_msix_shutdown(struct pci_dev* dev) 760void pci_msix_shutdown(struct pci_dev* dev)
743{ 761{
762 struct msi_desc *entry;
763
744 if (!pci_msi_enable || !dev || !dev->msix_enabled) 764 if (!pci_msi_enable || !dev || !dev->msix_enabled)
745 return; 765 return;
746 766
767 /* Return the device with MSI-X masked as initial states */
768 list_for_each_entry(entry, &dev->msi_list, list) {
769 /* Keep cached states to be restored */
770 __msix_mask_irq(entry, 1);
771 }
772
747 msix_set_enable(dev, 0); 773 msix_set_enable(dev, 0);
748 pci_intx_for_msi(dev, 1); 774 pci_intx_for_msi(dev, 1);
749 dev->msix_enabled = 0; 775 dev->msix_enabled = 0;
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index a0662842550b..de27c1cb5a2b 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -6,11 +6,11 @@
6#ifndef MSI_H 6#ifndef MSI_H
7#define MSI_H 7#define MSI_H
8 8
9#define PCI_MSIX_ENTRY_SIZE 16 9#define PCI_MSIX_ENTRY_SIZE 16
10#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0 10#define PCI_MSIX_ENTRY_LOWER_ADDR 0
11#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4 11#define PCI_MSIX_ENTRY_UPPER_ADDR 4
12#define PCI_MSIX_ENTRY_DATA_OFFSET 8 12#define PCI_MSIX_ENTRY_DATA 8
13#define PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET 12 13#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
14 14
15#define msi_control_reg(base) (base + PCI_MSI_FLAGS) 15#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
16#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) 16#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6c93af5ced18..dbd0f947f497 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1517,11 +1517,20 @@ void pci_enable_ari(struct pci_dev *dev)
1517 * 1517 *
1518 * Perform INTx swizzling for a device behind one level of bridge. This is 1518 * Perform INTx swizzling for a device behind one level of bridge. This is
1519 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 1519 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
1520 * behind bridges on add-in cards. 1520 * behind bridges on add-in cards. For devices with ARI enabled, the slot
1521 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
1522 * the PCI Express Base Specification, Revision 2.1)
1521 */ 1523 */
1522u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) 1524u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
1523{ 1525{
1524 return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; 1526 int slot;
1527
1528 if (pci_ari_enabled(dev->bus))
1529 slot = 0;
1530 else
1531 slot = PCI_SLOT(dev->devfn);
1532
1533 return (((pin - 1) + slot) % 4) + 1;
1525} 1534}
1526 1535
1527int 1536int
@@ -2171,7 +2180,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2171 u16 ctrl; 2180 u16 ctrl;
2172 struct pci_dev *pdev; 2181 struct pci_dev *pdev;
2173 2182
2174 if (dev->subordinate) 2183 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
2175 return -ENOTTY; 2184 return -ENOTTY;
2176 2185
2177 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 2186 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
index ece97df4df6d..a928d8ab6bda 100644
--- a/drivers/pci/pcie/aer/ecrc.c
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -106,7 +106,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev)
106 disable_ecrc_checking(dev); 106 disable_ecrc_checking(dev);
107 break; 107 break;
108 case ECRC_POLICY_ON: 108 case ECRC_POLICY_ON:
109 enable_ecrc_checking(dev);; 109 enable_ecrc_checking(dev);
110 break; 110 break;
111 default: 111 default:
112 return; 112 return;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 56552d74abea..06b965623962 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1058,6 +1058,11 @@ static void __devinit quirk_no_ata_d3(struct pci_dev *pdev)
1058} 1058}
1059DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3); 1059DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3);
1060DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3); 1060DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3);
1061/* ALi loses some register settings that we cannot then restore */
1062DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, quirk_no_ata_d3);
1063/* VIA comes back fine but we need to keep it alive or ACPI GTM failures
1064 occur when mode detecting */
1065DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_no_ata_d3);
1061 1066
1062/* This was originally an Alpha specific thing, but it really fits here. 1067/* This was originally an Alpha specific thing, but it really fits here.
1063 * The i82375 PCI/EISA bridge appears as non-classified. Fix that. 1068 * The i82375 PCI/EISA bridge appears as non-classified. Fix that.
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index b711fb7181e2..1898c7b47907 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -100,16 +100,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
100{ 100{
101 struct resource *res = &dev->resource[resource]; 101 struct resource *res = &dev->resource[resource];
102 struct resource *root; 102 struct resource *root;
103 char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
104 int err; 103 int err;
105 104
106 root = pci_find_parent_resource(dev, res); 105 root = pci_find_parent_resource(dev, res);
107 106
108 err = -EINVAL; 107 err = -EINVAL;
109 if (root != NULL) 108 if (root != NULL)
110 err = insert_resource(root, res); 109 err = request_resource(root, res);
111 110
112 if (err) { 111 if (err) {
112 const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
113 dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", 113 dev_err(&dev->dev, "BAR %d: %s of %s %pR\n",
114 resource, 114 resource,
115 root ? "address space collision on" : 115 root ? "address space collision on" :
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index eddb0748b0ea..8c02b6c53bdb 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -311,7 +311,7 @@ EXPORT_SYMBOL_GPL(pci_destroy_slot);
311#include <linux/pci_hotplug.h> 311#include <linux/pci_hotplug.h>
312/** 312/**
313 * pci_hp_create_link - create symbolic link to the hotplug driver module. 313 * pci_hp_create_link - create symbolic link to the hotplug driver module.
314 * @slot: struct pci_slot 314 * @pci_slot: struct pci_slot
315 * 315 *
316 * Helper function for pci_hotplug_core.c to create symbolic link to 316 * Helper function for pci_hotplug_core.c to create symbolic link to
317 * the hotplug driver module. 317 * the hotplug driver module.
@@ -334,7 +334,7 @@ EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
334 334
335/** 335/**
336 * pci_hp_remove_link - remove symbolic link to the hotplug driver module. 336 * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
337 * @slot: struct pci_slot 337 * @pci_slot: struct pci_slot
338 * 338 *
339 * Helper function for pci_hotplug_core.c to remove symbolic link to 339 * Helper function for pci_hotplug_core.c to remove symbolic link to
340 * the hotplug driver module. 340 * the hotplug driver module.
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index ec22284eed30..e1c1ec540893 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/smp_lock.h>
13#include <linux/syscalls.h> 12#include <linux/syscalls.h>
14#include <asm/uaccess.h> 13#include <asm/uaccess.h>
15#include "pci.h" 14#include "pci.h"
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 9ad97ea836e8..8eb04230fec7 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -472,7 +472,8 @@ static int __init init_tcic(void)
472 init_timer(&poll_timer); 472 init_timer(&poll_timer);
473 473
474 /* Build interrupt mask */ 474 /* Build interrupt mask */
475 printk(", %d sockets\n" KERN_INFO " irq list (", sockets); 475 printk(KERN_CONT ", %d sockets\n", sockets);
476 printk(KERN_INFO " irq list (");
476 if (irq_list_count == 0) 477 if (irq_list_count == 0)
477 mask = irq_mask; 478 mask = irq_mask;
478 else 479 else
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index 659421d0ca46..d4ad50d737b0 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * vrc4171_card.c, NEC VRC4171 Card Controller driver for Socket Services. 2 * vrc4171_card.c, NEC VRC4171 Card Controller driver for Socket Services.
3 * 3 *
4 * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 4 * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -32,7 +32,7 @@
32#include "i82365.h" 32#include "i82365.h"
33 33
34MODULE_DESCRIPTION("NEC VRC4171 Card Controllers driver for Socket Services"); 34MODULE_DESCRIPTION("NEC VRC4171 Card Controllers driver for Socket Services");
35MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); 35MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
36MODULE_LICENSE("GPL"); 36MODULE_LICENSE("GPL");
37 37
38#define CARD_MAX_SLOTS 2 38#define CARD_MAX_SLOTS 2
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
index 812f038e9bda..9b3c15827e5c 100644
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ b/drivers/pcmcia/vrc4173_cardu.c
@@ -6,7 +6,7 @@
6 * NEC VRC4173 CARDU driver for Socket Services 6 * NEC VRC4173 CARDU driver for Socket Services
7 * (This device doesn't support CardBus. it is supporting only 16bit PC Card.) 7 * (This device doesn't support CardBus. it is supporting only 16bit PC Card.)
8 * 8 *
9 * Copyright 2002,2003 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 9 * Copyright 2002,2003 Yoichi Yuasa <yuasa@linux-mips.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 12 * under the terms of the GNU General Public License as published by the
@@ -41,7 +41,7 @@
41#include "vrc4173_cardu.h" 41#include "vrc4173_cardu.h"
42 42
43MODULE_DESCRIPTION("NEC VRC4173 CARDU driver for Socket Services"); 43MODULE_DESCRIPTION("NEC VRC4173 CARDU driver for Socket Services");
44MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); 44MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
45MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46 46
47static int vrc4173_cardu_slots; 47static int vrc4173_cardu_slots;
diff --git a/drivers/pcmcia/vrc4173_cardu.h b/drivers/pcmcia/vrc4173_cardu.h
index 7d77c74120c1..a7d96018ed8d 100644
--- a/drivers/pcmcia/vrc4173_cardu.h
+++ b/drivers/pcmcia/vrc4173_cardu.h
@@ -5,7 +5,7 @@
5 * BRIEF MODULE DESCRIPTION 5 * BRIEF MODULE DESCRIPTION
6 * Include file for NEC VRC4173 CARDU. 6 * Include file for NEC VRC4173 CARDU.
7 * 7 *
8 * Copyright 2002 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 8 * Copyright 2002 Yoichi Yuasa <yuasa@linux-mips.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 11 * under the terms of the GNU General Public License as published by the
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7232fe7104aa..77c6097ced80 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -277,31 +277,6 @@ config THINKPAD_ACPI_UNSAFE_LEDS
277 Say N here, unless you are building a kernel for your own 277 Say N here, unless you are building a kernel for your own
278 use, and need to control the important firmware LEDs. 278 use, and need to control the important firmware LEDs.
279 279
280config THINKPAD_ACPI_DOCK
281 bool "Legacy Docking Station Support"
282 depends on THINKPAD_ACPI
283 depends on ACPI_DOCK=n
284 default n
285 ---help---
286 Allows the thinkpad_acpi driver to handle docking station events.
287 This support was made obsolete by the generic ACPI docking station
288 support (CONFIG_ACPI_DOCK). It will allow locking and removing the
289 laptop from the docking station, but will not properly connect PCI
290 devices.
291
292 If you are not sure, say N here.
293
294config THINKPAD_ACPI_BAY
295 bool "Legacy Removable Bay Support"
296 depends on THINKPAD_ACPI
297 default y
298 ---help---
299 Allows the thinkpad_acpi driver to handle removable bays. It will
300 electrically disable the device in the bay, and also generate
301 notifications when the bay lever is ejected or inserted.
302
303 If you are not sure, say Y here.
304
305config THINKPAD_ACPI_VIDEO 280config THINKPAD_ACPI_VIDEO
306 bool "Video output control support" 281 bool "Video output control support"
307 depends on THINKPAD_ACPI 282 depends on THINKPAD_ACPI
@@ -355,6 +330,7 @@ config EEEPC_LAPTOP
355 depends on INPUT 330 depends on INPUT
356 depends on EXPERIMENTAL 331 depends on EXPERIMENTAL
357 depends on RFKILL || RFKILL = n 332 depends on RFKILL || RFKILL = n
333 depends on HOTPLUG_PCI
358 select BACKLIGHT_CLASS_DEVICE 334 select BACKLIGHT_CLASS_DEVICE
359 select HWMON 335 select HWMON
360 ---help--- 336 ---help---
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index be2fd6f91639..fb45f5ee8df1 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -973,7 +973,7 @@ static int acer_rfkill_set(void *data, bool blocked)
973{ 973{
974 acpi_status status; 974 acpi_status status;
975 u32 cap = (unsigned long)data; 975 u32 cap = (unsigned long)data;
976 status = set_u32(!!blocked, cap); 976 status = set_u32(!blocked, cap);
977 if (ACPI_FAILURE(status)) 977 if (ACPI_FAILURE(status))
978 return -ENODEV; 978 return -ENODEV;
979 return 0; 979 return 0;
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 4207b26ff990..222ffb892f22 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -16,6 +16,8 @@
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 */ 17 */
18 18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
19#include <linux/kernel.h> 21#include <linux/kernel.h>
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/init.h> 23#include <linux/init.h>
@@ -31,6 +33,7 @@
31#include <linux/input.h> 33#include <linux/input.h>
32#include <linux/rfkill.h> 34#include <linux/rfkill.h>
33#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/pci_hotplug.h>
34 37
35#define EEEPC_LAPTOP_VERSION "0.1" 38#define EEEPC_LAPTOP_VERSION "0.1"
36 39
@@ -40,11 +43,6 @@
40#define EEEPC_HOTK_DEVICE_NAME "Hotkey" 43#define EEEPC_HOTK_DEVICE_NAME "Hotkey"
41#define EEEPC_HOTK_HID "ASUS010" 44#define EEEPC_HOTK_HID "ASUS010"
42 45
43#define EEEPC_LOG EEEPC_HOTK_FILE ": "
44#define EEEPC_ERR KERN_ERR EEEPC_LOG
45#define EEEPC_WARNING KERN_WARNING EEEPC_LOG
46#define EEEPC_NOTICE KERN_NOTICE EEEPC_LOG
47#define EEEPC_INFO KERN_INFO EEEPC_LOG
48 46
49/* 47/*
50 * Definitions for Asus EeePC 48 * Definitions for Asus EeePC
@@ -141,8 +139,11 @@ struct eeepc_hotk {
141 u16 event_count[128]; /* count for each event */ 139 u16 event_count[128]; /* count for each event */
142 struct input_dev *inputdev; 140 struct input_dev *inputdev;
143 u16 *keycode_map; 141 u16 *keycode_map;
144 struct rfkill *eeepc_wlan_rfkill; 142 struct rfkill *wlan_rfkill;
145 struct rfkill *eeepc_bluetooth_rfkill; 143 struct rfkill *bluetooth_rfkill;
144 struct rfkill *wwan3g_rfkill;
145 struct hotplug_slot *hotplug_slot;
146 struct work_struct hotplug_work;
146}; 147};
147 148
148/* The actual device the driver binds to */ 149/* The actual device the driver binds to */
@@ -213,6 +214,15 @@ static struct acpi_driver eeepc_hotk_driver = {
213 }, 214 },
214}; 215};
215 216
217/* PCI hotplug ops */
218static int eeepc_get_adapter_status(struct hotplug_slot *slot, u8 *value);
219
220static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
221 .owner = THIS_MODULE,
222 .get_adapter_status = eeepc_get_adapter_status,
223 .get_power_status = eeepc_get_adapter_status,
224};
225
216/* The backlight device /sys/class/backlight */ 226/* The backlight device /sys/class/backlight */
217static struct backlight_device *eeepc_backlight_device; 227static struct backlight_device *eeepc_backlight_device;
218 228
@@ -274,20 +284,20 @@ static int set_acpi(int cm, int value)
274 if (method == NULL) 284 if (method == NULL)
275 return -ENODEV; 285 return -ENODEV;
276 if (write_acpi_int(ehotk->handle, method, value, NULL)) 286 if (write_acpi_int(ehotk->handle, method, value, NULL))
277 printk(EEEPC_WARNING "Error writing %s\n", method); 287 pr_warning("Error writing %s\n", method);
278 } 288 }
279 return 0; 289 return 0;
280} 290}
281 291
282static int get_acpi(int cm) 292static int get_acpi(int cm)
283{ 293{
284 int value = -1; 294 int value = -ENODEV;
285 if ((ehotk->cm_supported & (0x1 << cm))) { 295 if ((ehotk->cm_supported & (0x1 << cm))) {
286 const char *method = cm_getv[cm]; 296 const char *method = cm_getv[cm];
287 if (method == NULL) 297 if (method == NULL)
288 return -ENODEV; 298 return -ENODEV;
289 if (read_acpi_int(ehotk->handle, method, &value)) 299 if (read_acpi_int(ehotk->handle, method, &value))
290 printk(EEEPC_WARNING "Error reading %s\n", method); 300 pr_warning("Error reading %s\n", method);
291 } 301 }
292 return value; 302 return value;
293} 303}
@@ -359,13 +369,19 @@ static ssize_t store_sys_acpi(int cm, const char *buf, size_t count)
359 369
360 rv = parse_arg(buf, count, &value); 370 rv = parse_arg(buf, count, &value);
361 if (rv > 0) 371 if (rv > 0)
362 set_acpi(cm, value); 372 value = set_acpi(cm, value);
373 if (value < 0)
374 return value;
363 return rv; 375 return rv;
364} 376}
365 377
366static ssize_t show_sys_acpi(int cm, char *buf) 378static ssize_t show_sys_acpi(int cm, char *buf)
367{ 379{
368 return sprintf(buf, "%d\n", get_acpi(cm)); 380 int value = get_acpi(cm);
381
382 if (value < 0)
383 return value;
384 return sprintf(buf, "%d\n", value);
369} 385}
370 386
371#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \ 387#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \
@@ -539,6 +555,28 @@ static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
539 return -EINVAL; 555 return -EINVAL;
540} 556}
541 557
558static void cmsg_quirk(int cm, const char *name)
559{
560 int dummy;
561
562 /* Some BIOSes do not report cm although it is avaliable.
563 Check if cm_getv[cm] works and, if yes, assume cm should be set. */
564 if (!(ehotk->cm_supported & (1 << cm))
565 && !read_acpi_int(ehotk->handle, cm_getv[cm], &dummy)) {
566 pr_info("%s (%x) not reported by BIOS,"
567 " enabling anyway\n", name, 1 << cm);
568 ehotk->cm_supported |= 1 << cm;
569 }
570}
571
572static void cmsg_quirks(void)
573{
574 cmsg_quirk(CM_ASL_LID, "LID");
575 cmsg_quirk(CM_ASL_TYPE, "TYPE");
576 cmsg_quirk(CM_ASL_PANELPOWER, "PANELPOWER");
577 cmsg_quirk(CM_ASL_TPD, "TPD");
578}
579
542static int eeepc_hotk_check(void) 580static int eeepc_hotk_check(void)
543{ 581{
544 const struct key_entry *key; 582 const struct key_entry *key;
@@ -551,26 +589,24 @@ static int eeepc_hotk_check(void)
551 if (ehotk->device->status.present) { 589 if (ehotk->device->status.present) {
552 if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag, 590 if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
553 &buffer)) { 591 &buffer)) {
554 printk(EEEPC_ERR "Hotkey initialization failed\n"); 592 pr_err("Hotkey initialization failed\n");
555 return -ENODEV; 593 return -ENODEV;
556 } else { 594 } else {
557 printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n", 595 pr_notice("Hotkey init flags 0x%x\n", ehotk->init_flag);
558 ehotk->init_flag);
559 } 596 }
560 /* get control methods supported */ 597 /* get control methods supported */
561 if (read_acpi_int(ehotk->handle, "CMSG" 598 if (read_acpi_int(ehotk->handle, "CMSG"
562 , &ehotk->cm_supported)) { 599 , &ehotk->cm_supported)) {
563 printk(EEEPC_ERR 600 pr_err("Get control methods supported failed\n");
564 "Get control methods supported failed\n");
565 return -ENODEV; 601 return -ENODEV;
566 } else { 602 } else {
567 printk(EEEPC_INFO 603 cmsg_quirks();
568 "Get control methods supported: 0x%x\n", 604 pr_info("Get control methods supported: 0x%x\n",
569 ehotk->cm_supported); 605 ehotk->cm_supported);
570 } 606 }
571 ehotk->inputdev = input_allocate_device(); 607 ehotk->inputdev = input_allocate_device();
572 if (!ehotk->inputdev) { 608 if (!ehotk->inputdev) {
573 printk(EEEPC_INFO "Unable to allocate input device\n"); 609 pr_info("Unable to allocate input device\n");
574 return 0; 610 return 0;
575 } 611 }
576 ehotk->inputdev->name = "Asus EeePC extra buttons"; 612 ehotk->inputdev->name = "Asus EeePC extra buttons";
@@ -589,12 +625,12 @@ static int eeepc_hotk_check(void)
589 } 625 }
590 result = input_register_device(ehotk->inputdev); 626 result = input_register_device(ehotk->inputdev);
591 if (result) { 627 if (result) {
592 printk(EEEPC_INFO "Unable to register input device\n"); 628 pr_info("Unable to register input device\n");
593 input_free_device(ehotk->inputdev); 629 input_free_device(ehotk->inputdev);
594 return 0; 630 return 0;
595 } 631 }
596 } else { 632 } else {
597 printk(EEEPC_ERR "Hotkey device not present, aborting\n"); 633 pr_err("Hotkey device not present, aborting\n");
598 return -EINVAL; 634 return -EINVAL;
599 } 635 }
600 return 0; 636 return 0;
@@ -612,14 +648,27 @@ static int notify_brn(void)
612 return -1; 648 return -1;
613} 649}
614 650
615static void eeepc_rfkill_hotplug(void) 651static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
652 u8 *value)
653{
654 int val = get_acpi(CM_ASL_WLAN);
655
656 if (val == 1 || val == 0)
657 *value = val;
658 else
659 return -EINVAL;
660
661 return 0;
662}
663
664static void eeepc_hotplug_work(struct work_struct *work)
616{ 665{
617 struct pci_dev *dev; 666 struct pci_dev *dev;
618 struct pci_bus *bus = pci_find_bus(0, 1); 667 struct pci_bus *bus = pci_find_bus(0, 1);
619 bool blocked; 668 bool blocked;
620 669
621 if (!bus) { 670 if (!bus) {
622 printk(EEEPC_WARNING "Unable to find PCI bus 1?\n"); 671 pr_warning("Unable to find PCI bus 1?\n");
623 return; 672 return;
624 } 673 }
625 674
@@ -635,7 +684,7 @@ static void eeepc_rfkill_hotplug(void)
635 if (dev) { 684 if (dev) {
636 pci_bus_assign_resources(bus); 685 pci_bus_assign_resources(bus);
637 if (pci_bus_add_device(dev)) 686 if (pci_bus_add_device(dev))
638 printk(EEEPC_ERR "Unable to hotplug wifi\n"); 687 pr_err("Unable to hotplug wifi\n");
639 } 688 }
640 } else { 689 } else {
641 dev = pci_get_slot(bus, 0); 690 dev = pci_get_slot(bus, 0);
@@ -645,7 +694,7 @@ static void eeepc_rfkill_hotplug(void)
645 } 694 }
646 } 695 }
647 696
648 rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked); 697 rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
649} 698}
650 699
651static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) 700static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
@@ -653,7 +702,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
653 if (event != ACPI_NOTIFY_BUS_CHECK) 702 if (event != ACPI_NOTIFY_BUS_CHECK)
654 return; 703 return;
655 704
656 eeepc_rfkill_hotplug(); 705 schedule_work(&ehotk->hotplug_work);
657} 706}
658 707
659static void eeepc_hotk_notify(struct acpi_device *device, u32 event) 708static void eeepc_hotk_notify(struct acpi_device *device, u32 event)
@@ -718,8 +767,7 @@ static int eeepc_register_rfkill_notifier(char *node)
718 eeepc_rfkill_notify, 767 eeepc_rfkill_notify,
719 NULL); 768 NULL);
720 if (ACPI_FAILURE(status)) 769 if (ACPI_FAILURE(status))
721 printk(EEEPC_WARNING 770 pr_warning("Failed to register notify on %s\n", node);
722 "Failed to register notify on %s\n", node);
723 } else 771 } else
724 return -ENODEV; 772 return -ENODEV;
725 773
@@ -738,19 +786,66 @@ static void eeepc_unregister_rfkill_notifier(char *node)
738 ACPI_SYSTEM_NOTIFY, 786 ACPI_SYSTEM_NOTIFY,
739 eeepc_rfkill_notify); 787 eeepc_rfkill_notify);
740 if (ACPI_FAILURE(status)) 788 if (ACPI_FAILURE(status))
741 printk(EEEPC_ERR 789 pr_err("Error removing rfkill notify handler %s\n",
742 "Error removing rfkill notify handler %s\n",
743 node); 790 node);
744 } 791 }
745} 792}
746 793
794static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
795{
796 kfree(hotplug_slot->info);
797 kfree(hotplug_slot);
798}
799
800static int eeepc_setup_pci_hotplug(void)
801{
802 int ret = -ENOMEM;
803 struct pci_bus *bus = pci_find_bus(0, 1);
804
805 if (!bus) {
806 pr_err("Unable to find wifi PCI bus\n");
807 return -ENODEV;
808 }
809
810 ehotk->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
811 if (!ehotk->hotplug_slot)
812 goto error_slot;
813
814 ehotk->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
815 GFP_KERNEL);
816 if (!ehotk->hotplug_slot->info)
817 goto error_info;
818
819 ehotk->hotplug_slot->private = ehotk;
820 ehotk->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
821 ehotk->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
822 eeepc_get_adapter_status(ehotk->hotplug_slot,
823 &ehotk->hotplug_slot->info->adapter_status);
824
825 ret = pci_hp_register(ehotk->hotplug_slot, bus, 0, "eeepc-wifi");
826 if (ret) {
827 pr_err("Unable to register hotplug slot - %d\n", ret);
828 goto error_register;
829 }
830
831 return 0;
832
833error_register:
834 kfree(ehotk->hotplug_slot->info);
835error_info:
836 kfree(ehotk->hotplug_slot);
837 ehotk->hotplug_slot = NULL;
838error_slot:
839 return ret;
840}
841
747static int eeepc_hotk_add(struct acpi_device *device) 842static int eeepc_hotk_add(struct acpi_device *device)
748{ 843{
749 int result; 844 int result;
750 845
751 if (!device) 846 if (!device)
752 return -EINVAL; 847 return -EINVAL;
753 printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n"); 848 pr_notice(EEEPC_HOTK_NAME "\n");
754 ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL); 849 ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
755 if (!ehotk) 850 if (!ehotk)
756 return -ENOMEM; 851 return -ENOMEM;
@@ -764,53 +859,8 @@ static int eeepc_hotk_add(struct acpi_device *device)
764 if (result) 859 if (result)
765 goto ehotk_fail; 860 goto ehotk_fail;
766 861
767 eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
768 eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
769
770 if (get_acpi(CM_ASL_WLAN) != -1) {
771 ehotk->eeepc_wlan_rfkill = rfkill_alloc("eeepc-wlan",
772 &device->dev,
773 RFKILL_TYPE_WLAN,
774 &eeepc_rfkill_ops,
775 (void *)CM_ASL_WLAN);
776
777 if (!ehotk->eeepc_wlan_rfkill)
778 goto wlan_fail;
779
780 rfkill_init_sw_state(ehotk->eeepc_wlan_rfkill,
781 get_acpi(CM_ASL_WLAN) != 1);
782 result = rfkill_register(ehotk->eeepc_wlan_rfkill);
783 if (result)
784 goto wlan_fail;
785 }
786
787 if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
788 ehotk->eeepc_bluetooth_rfkill =
789 rfkill_alloc("eeepc-bluetooth",
790 &device->dev,
791 RFKILL_TYPE_BLUETOOTH,
792 &eeepc_rfkill_ops,
793 (void *)CM_ASL_BLUETOOTH);
794
795 if (!ehotk->eeepc_bluetooth_rfkill)
796 goto bluetooth_fail;
797
798 rfkill_init_sw_state(ehotk->eeepc_bluetooth_rfkill,
799 get_acpi(CM_ASL_BLUETOOTH) != 1);
800 result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
801 if (result)
802 goto bluetooth_fail;
803 }
804
805 return 0; 862 return 0;
806 863
807 bluetooth_fail:
808 rfkill_destroy(ehotk->eeepc_bluetooth_rfkill);
809 rfkill_unregister(ehotk->eeepc_wlan_rfkill);
810 wlan_fail:
811 rfkill_destroy(ehotk->eeepc_wlan_rfkill);
812 eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
813 eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
814 ehotk_fail: 864 ehotk_fail:
815 kfree(ehotk); 865 kfree(ehotk);
816 ehotk = NULL; 866 ehotk = NULL;
@@ -823,16 +873,13 @@ static int eeepc_hotk_remove(struct acpi_device *device, int type)
823 if (!device || !acpi_driver_data(device)) 873 if (!device || !acpi_driver_data(device))
824 return -EINVAL; 874 return -EINVAL;
825 875
826 eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
827 eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
828
829 kfree(ehotk); 876 kfree(ehotk);
830 return 0; 877 return 0;
831} 878}
832 879
833static int eeepc_hotk_resume(struct acpi_device *device) 880static int eeepc_hotk_resume(struct acpi_device *device)
834{ 881{
835 if (ehotk->eeepc_wlan_rfkill) { 882 if (ehotk->wlan_rfkill) {
836 bool wlan; 883 bool wlan;
837 884
838 /* Workaround - it seems that _PTS disables the wireless 885 /* Workaround - it seems that _PTS disables the wireless
@@ -844,14 +891,13 @@ static int eeepc_hotk_resume(struct acpi_device *device)
844 wlan = get_acpi(CM_ASL_WLAN); 891 wlan = get_acpi(CM_ASL_WLAN);
845 set_acpi(CM_ASL_WLAN, wlan); 892 set_acpi(CM_ASL_WLAN, wlan);
846 893
847 rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, 894 rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1);
848 wlan != 1);
849 895
850 eeepc_rfkill_hotplug(); 896 schedule_work(&ehotk->hotplug_work);
851 } 897 }
852 898
853 if (ehotk->eeepc_bluetooth_rfkill) 899 if (ehotk->bluetooth_rfkill)
854 rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill, 900 rfkill_set_sw_state(ehotk->bluetooth_rfkill,
855 get_acpi(CM_ASL_BLUETOOTH) != 1); 901 get_acpi(CM_ASL_BLUETOOTH) != 1);
856 902
857 return 0; 903 return 0;
@@ -973,10 +1019,16 @@ static void eeepc_backlight_exit(void)
973 1019
974static void eeepc_rfkill_exit(void) 1020static void eeepc_rfkill_exit(void)
975{ 1021{
976 if (ehotk->eeepc_wlan_rfkill) 1022 eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
977 rfkill_unregister(ehotk->eeepc_wlan_rfkill); 1023 eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
978 if (ehotk->eeepc_bluetooth_rfkill) 1024 if (ehotk->wlan_rfkill)
979 rfkill_unregister(ehotk->eeepc_bluetooth_rfkill); 1025 rfkill_unregister(ehotk->wlan_rfkill);
1026 if (ehotk->bluetooth_rfkill)
1027 rfkill_unregister(ehotk->bluetooth_rfkill);
1028 if (ehotk->wwan3g_rfkill)
1029 rfkill_unregister(ehotk->wwan3g_rfkill);
1030 if (ehotk->hotplug_slot)
1031 pci_hp_deregister(ehotk->hotplug_slot);
980} 1032}
981 1033
982static void eeepc_input_exit(void) 1034static void eeepc_input_exit(void)
@@ -1011,6 +1063,77 @@ static void __exit eeepc_laptop_exit(void)
1011 platform_driver_unregister(&platform_driver); 1063 platform_driver_unregister(&platform_driver);
1012} 1064}
1013 1065
1066static int eeepc_new_rfkill(struct rfkill **rfkill,
1067 const char *name, struct device *dev,
1068 enum rfkill_type type, int cm)
1069{
1070 int result;
1071
1072 result = get_acpi(cm);
1073 if (result < 0)
1074 return result;
1075
1076 *rfkill = rfkill_alloc(name, dev, type,
1077 &eeepc_rfkill_ops, (void *)(unsigned long)cm);
1078
1079 if (!*rfkill)
1080 return -EINVAL;
1081
1082 rfkill_init_sw_state(*rfkill, get_acpi(cm) != 1);
1083 result = rfkill_register(*rfkill);
1084 if (result) {
1085 rfkill_destroy(*rfkill);
1086 *rfkill = NULL;
1087 return result;
1088 }
1089 return 0;
1090}
1091
1092
1093static int eeepc_rfkill_init(struct device *dev)
1094{
1095 int result = 0;
1096
1097 INIT_WORK(&ehotk->hotplug_work, eeepc_hotplug_work);
1098
1099 eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
1100 eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
1101
1102 result = eeepc_new_rfkill(&ehotk->wlan_rfkill,
1103 "eeepc-wlan", dev,
1104 RFKILL_TYPE_WLAN, CM_ASL_WLAN);
1105
1106 if (result && result != -ENODEV)
1107 goto exit;
1108
1109 result = eeepc_new_rfkill(&ehotk->bluetooth_rfkill,
1110 "eeepc-bluetooth", dev,
1111 RFKILL_TYPE_BLUETOOTH, CM_ASL_BLUETOOTH);
1112
1113 if (result && result != -ENODEV)
1114 goto exit;
1115
1116 result = eeepc_new_rfkill(&ehotk->wwan3g_rfkill,
1117 "eeepc-wwan3g", dev,
1118 RFKILL_TYPE_WWAN, CM_ASL_3G);
1119
1120 if (result && result != -ENODEV)
1121 goto exit;
1122
1123 result = eeepc_setup_pci_hotplug();
1124 /*
1125 * If we get -EBUSY then something else is handling the PCI hotplug -
1126 * don't fail in this case
1127 */
1128 if (result == -EBUSY)
1129 result = 0;
1130
1131exit:
1132 if (result && result != -ENODEV)
1133 eeepc_rfkill_exit();
1134 return result;
1135}
1136
1014static int eeepc_backlight_init(struct device *dev) 1137static int eeepc_backlight_init(struct device *dev)
1015{ 1138{
1016 struct backlight_device *bd; 1139 struct backlight_device *bd;
@@ -1018,8 +1141,7 @@ static int eeepc_backlight_init(struct device *dev)
1018 bd = backlight_device_register(EEEPC_HOTK_FILE, dev, 1141 bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
1019 NULL, &eeepcbl_ops); 1142 NULL, &eeepcbl_ops);
1020 if (IS_ERR(bd)) { 1143 if (IS_ERR(bd)) {
1021 printk(EEEPC_ERR 1144 pr_err("Could not register eeepc backlight device\n");
1022 "Could not register eeepc backlight device\n");
1023 eeepc_backlight_device = NULL; 1145 eeepc_backlight_device = NULL;
1024 return PTR_ERR(bd); 1146 return PTR_ERR(bd);
1025 } 1147 }
@@ -1038,8 +1160,7 @@ static int eeepc_hwmon_init(struct device *dev)
1038 1160
1039 hwmon = hwmon_device_register(dev); 1161 hwmon = hwmon_device_register(dev);
1040 if (IS_ERR(hwmon)) { 1162 if (IS_ERR(hwmon)) {
1041 printk(EEEPC_ERR 1163 pr_err("Could not register eeepc hwmon device\n");
1042 "Could not register eeepc hwmon device\n");
1043 eeepc_hwmon_device = NULL; 1164 eeepc_hwmon_device = NULL;
1044 return PTR_ERR(hwmon); 1165 return PTR_ERR(hwmon);
1045 } 1166 }
@@ -1065,19 +1186,6 @@ static int __init eeepc_laptop_init(void)
1065 acpi_bus_unregister_driver(&eeepc_hotk_driver); 1186 acpi_bus_unregister_driver(&eeepc_hotk_driver);
1066 return -ENODEV; 1187 return -ENODEV;
1067 } 1188 }
1068 dev = acpi_get_physical_device(ehotk->device->handle);
1069
1070 if (!acpi_video_backlight_support()) {
1071 result = eeepc_backlight_init(dev);
1072 if (result)
1073 goto fail_backlight;
1074 } else
1075 printk(EEEPC_INFO "Backlight controlled by ACPI video "
1076 "driver\n");
1077
1078 result = eeepc_hwmon_init(dev);
1079 if (result)
1080 goto fail_hwmon;
1081 1189
1082 eeepc_enable_camera(); 1190 eeepc_enable_camera();
1083 1191
@@ -1097,7 +1205,33 @@ static int __init eeepc_laptop_init(void)
1097 &platform_attribute_group); 1205 &platform_attribute_group);
1098 if (result) 1206 if (result)
1099 goto fail_sysfs; 1207 goto fail_sysfs;
1208
1209 dev = &platform_device->dev;
1210
1211 if (!acpi_video_backlight_support()) {
1212 result = eeepc_backlight_init(dev);
1213 if (result)
1214 goto fail_backlight;
1215 } else
1216 pr_info("Backlight controlled by ACPI video "
1217 "driver\n");
1218
1219 result = eeepc_hwmon_init(dev);
1220 if (result)
1221 goto fail_hwmon;
1222
1223 result = eeepc_rfkill_init(dev);
1224 if (result)
1225 goto fail_rfkill;
1226
1100 return 0; 1227 return 0;
1228fail_rfkill:
1229 eeepc_hwmon_exit();
1230fail_hwmon:
1231 eeepc_backlight_exit();
1232fail_backlight:
1233 sysfs_remove_group(&platform_device->dev.kobj,
1234 &platform_attribute_group);
1101fail_sysfs: 1235fail_sysfs:
1102 platform_device_del(platform_device); 1236 platform_device_del(platform_device);
1103fail_platform_device2: 1237fail_platform_device2:
@@ -1105,12 +1239,7 @@ fail_platform_device2:
1105fail_platform_device1: 1239fail_platform_device1:
1106 platform_driver_unregister(&platform_driver); 1240 platform_driver_unregister(&platform_driver);
1107fail_platform_driver: 1241fail_platform_driver:
1108 eeepc_hwmon_exit();
1109fail_hwmon:
1110 eeepc_backlight_exit();
1111fail_backlight:
1112 eeepc_input_exit(); 1242 eeepc_input_exit();
1113 eeepc_rfkill_exit();
1114 return result; 1243 return result;
1115} 1244}
1116 1245
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 4ac2311c00af..a2ad53e15874 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -171,7 +171,7 @@ static int hp_wmi_tablet_state(void)
171static int hp_wmi_set_block(void *data, bool blocked) 171static int hp_wmi_set_block(void *data, bool blocked)
172{ 172{
173 unsigned long b = (unsigned long) data; 173 unsigned long b = (unsigned long) data;
174 int query = BIT(b + 8) | ((!!blocked) << b); 174 int query = BIT(b + 8) | ((!blocked) << b);
175 175
176 return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, query); 176 return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, query);
177} 177}
@@ -520,11 +520,13 @@ static int hp_wmi_resume_handler(struct platform_device *device)
520 * the input layer will only actually pass it on if the state 520 * the input layer will only actually pass it on if the state
521 * changed. 521 * changed.
522 */ 522 */
523 523 if (hp_wmi_input_dev) {
524 input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); 524 input_report_switch(hp_wmi_input_dev, SW_DOCK,
525 input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, 525 hp_wmi_dock_state());
526 hp_wmi_tablet_state()); 526 input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
527 input_sync(hp_wmi_input_dev); 527 hp_wmi_tablet_state());
528 input_sync(hp_wmi_input_dev);
529 }
528 530
529 return 0; 531 return 0;
530} 532}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index a463fd72c495..e85600852502 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -239,12 +239,6 @@ struct ibm_init_struct {
239}; 239};
240 240
241static struct { 241static struct {
242#ifdef CONFIG_THINKPAD_ACPI_BAY
243 u32 bay_status:1;
244 u32 bay_eject:1;
245 u32 bay_status2:1;
246 u32 bay_eject2:1;
247#endif
248 u32 bluetooth:1; 242 u32 bluetooth:1;
249 u32 hotkey:1; 243 u32 hotkey:1;
250 u32 hotkey_mask:1; 244 u32 hotkey_mask:1;
@@ -589,18 +583,6 @@ static int acpi_ec_write(int i, u8 v)
589 return 1; 583 return 1;
590} 584}
591 585
592#if defined(CONFIG_THINKPAD_ACPI_DOCK) || defined(CONFIG_THINKPAD_ACPI_BAY)
593static int _sta(acpi_handle handle)
594{
595 int status;
596
597 if (!handle || !acpi_evalf(handle, &status, "_STA", "d"))
598 status = 0;
599
600 return status;
601}
602#endif
603
604static int issue_thinkpad_cmos_command(int cmos_cmd) 586static int issue_thinkpad_cmos_command(int cmos_cmd)
605{ 587{
606 if (!cmos_handle) 588 if (!cmos_handle)
@@ -784,6 +766,8 @@ static int dispatch_procfs_write(struct file *file,
784 766
785 if (!ibm || !ibm->write) 767 if (!ibm || !ibm->write)
786 return -EINVAL; 768 return -EINVAL;
769 if (count > PAGE_SIZE - 2)
770 return -EINVAL;
787 771
788 kernbuf = kmalloc(count + 2, GFP_KERNEL); 772 kernbuf = kmalloc(count + 2, GFP_KERNEL);
789 if (!kernbuf) 773 if (!kernbuf)
@@ -4442,293 +4426,6 @@ static struct ibm_struct light_driver_data = {
4442}; 4426};
4443 4427
4444/************************************************************************* 4428/*************************************************************************
4445 * Dock subdriver
4446 */
4447
4448#ifdef CONFIG_THINKPAD_ACPI_DOCK
4449
4450static void dock_notify(struct ibm_struct *ibm, u32 event);
4451static int dock_read(char *p);
4452static int dock_write(char *buf);
4453
4454TPACPI_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */
4455 "\\_SB.PCI0.DOCK", /* 600e/x,770e,770x,A2xm/p,T20-22,X20-21 */
4456 "\\_SB.PCI0.PCI1.DOCK", /* all others */
4457 "\\_SB.PCI.ISA.SLCE", /* 570 */
4458 ); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */
4459
4460/* don't list other alternatives as we install a notify handler on the 570 */
4461TPACPI_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */
4462
4463static const struct acpi_device_id ibm_pci_device_ids[] = {
4464 {PCI_ROOT_HID_STRING, 0},
4465 {"", 0},
4466};
4467
4468static struct tp_acpi_drv_struct ibm_dock_acpidriver[2] = {
4469 {
4470 .notify = dock_notify,
4471 .handle = &dock_handle,
4472 .type = ACPI_SYSTEM_NOTIFY,
4473 },
4474 {
4475 /* THIS ONE MUST NEVER BE USED FOR DRIVER AUTOLOADING.
4476 * We just use it to get notifications of dock hotplug
4477 * in very old thinkpads */
4478 .hid = ibm_pci_device_ids,
4479 .notify = dock_notify,
4480 .handle = &pci_handle,
4481 .type = ACPI_SYSTEM_NOTIFY,
4482 },
4483};
4484
4485static struct ibm_struct dock_driver_data[2] = {
4486 {
4487 .name = "dock",
4488 .read = dock_read,
4489 .write = dock_write,
4490 .acpi = &ibm_dock_acpidriver[0],
4491 },
4492 {
4493 .name = "dock",
4494 .acpi = &ibm_dock_acpidriver[1],
4495 },
4496};
4497
4498#define dock_docked() (_sta(dock_handle) & 1)
4499
4500static int __init dock_init(struct ibm_init_struct *iibm)
4501{
4502 vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver\n");
4503
4504 TPACPI_ACPIHANDLE_INIT(dock);
4505
4506 vdbg_printk(TPACPI_DBG_INIT, "dock is %s\n",
4507 str_supported(dock_handle != NULL));
4508
4509 return (dock_handle)? 0 : 1;
4510}
4511
4512static int __init dock_init2(struct ibm_init_struct *iibm)
4513{
4514 int dock2_needed;
4515
4516 vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver part 2\n");
4517
4518 if (dock_driver_data[0].flags.acpi_driver_registered &&
4519 dock_driver_data[0].flags.acpi_notify_installed) {
4520 TPACPI_ACPIHANDLE_INIT(pci);
4521 dock2_needed = (pci_handle != NULL);
4522 vdbg_printk(TPACPI_DBG_INIT,
4523 "dock PCI handler for the TP 570 is %s\n",
4524 str_supported(dock2_needed));
4525 } else {
4526 vdbg_printk(TPACPI_DBG_INIT,
4527 "dock subdriver part 2 not required\n");
4528 dock2_needed = 0;
4529 }
4530
4531 return (dock2_needed)? 0 : 1;
4532}
4533
4534static void dock_notify(struct ibm_struct *ibm, u32 event)
4535{
4536 int docked = dock_docked();
4537 int pci = ibm->acpi->hid && ibm->acpi->device &&
4538 acpi_match_device_ids(ibm->acpi->device, ibm_pci_device_ids);
4539 int data;
4540
4541 if (event == 1 && !pci) /* 570 */
4542 data = 1; /* button */
4543 else if (event == 1 && pci) /* 570 */
4544 data = 3; /* dock */
4545 else if (event == 3 && docked)
4546 data = 1; /* button */
4547 else if (event == 3 && !docked)
4548 data = 2; /* undock */
4549 else if (event == 0 && docked)
4550 data = 3; /* dock */
4551 else {
4552 printk(TPACPI_ERR "unknown dock event %d, status %d\n",
4553 event, _sta(dock_handle));
4554 data = 0; /* unknown */
4555 }
4556 acpi_bus_generate_proc_event(ibm->acpi->device, event, data);
4557 acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
4558 dev_name(&ibm->acpi->device->dev),
4559 event, data);
4560}
4561
4562static int dock_read(char *p)
4563{
4564 int len = 0;
4565 int docked = dock_docked();
4566
4567 if (!dock_handle)
4568 len += sprintf(p + len, "status:\t\tnot supported\n");
4569 else if (!docked)
4570 len += sprintf(p + len, "status:\t\tundocked\n");
4571 else {
4572 len += sprintf(p + len, "status:\t\tdocked\n");
4573 len += sprintf(p + len, "commands:\tdock, undock\n");
4574 }
4575
4576 return len;
4577}
4578
4579static int dock_write(char *buf)
4580{
4581 char *cmd;
4582
4583 if (!dock_docked())
4584 return -ENODEV;
4585
4586 while ((cmd = next_cmd(&buf))) {
4587 if (strlencmp(cmd, "undock") == 0) {
4588 if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 0) ||
4589 !acpi_evalf(dock_handle, NULL, "_EJ0", "vd", 1))
4590 return -EIO;
4591 } else if (strlencmp(cmd, "dock") == 0) {
4592 if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 1))
4593 return -EIO;
4594 } else
4595 return -EINVAL;
4596 }
4597
4598 return 0;
4599}
4600
4601#endif /* CONFIG_THINKPAD_ACPI_DOCK */
4602
4603/*************************************************************************
4604 * Bay subdriver
4605 */
4606
4607#ifdef CONFIG_THINKPAD_ACPI_BAY
4608
4609TPACPI_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */
4610 "\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */
4611 "\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */
4612 "\\_SB.PCI0.IDE0.SCND.MSTR", /* all others */
4613 ); /* A21e, R30, R31 */
4614TPACPI_HANDLE(bay_ej, bay, "_EJ3", /* 600e/x, A2xm/p, A3x */
4615 "_EJ0", /* all others */
4616 ); /* 570,A21e,G4x,R30,R31,R32,R40e,R50e */
4617TPACPI_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */
4618 "\\_SB.PCI0.IDE0.IDEP.IDPS", /* 600e/x, 770e, 770x */
4619 ); /* all others */
4620TPACPI_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */
4621 "_EJ0", /* 770x */
4622 ); /* all others */
4623
4624static int __init bay_init(struct ibm_init_struct *iibm)
4625{
4626 vdbg_printk(TPACPI_DBG_INIT, "initializing bay subdriver\n");
4627
4628 TPACPI_ACPIHANDLE_INIT(bay);
4629 if (bay_handle)
4630 TPACPI_ACPIHANDLE_INIT(bay_ej);
4631 TPACPI_ACPIHANDLE_INIT(bay2);
4632 if (bay2_handle)
4633 TPACPI_ACPIHANDLE_INIT(bay2_ej);
4634
4635 tp_features.bay_status = bay_handle &&
4636 acpi_evalf(bay_handle, NULL, "_STA", "qv");
4637 tp_features.bay_status2 = bay2_handle &&
4638 acpi_evalf(bay2_handle, NULL, "_STA", "qv");
4639
4640 tp_features.bay_eject = bay_handle && bay_ej_handle &&
4641 (strlencmp(bay_ej_path, "_EJ0") == 0 || experimental);
4642 tp_features.bay_eject2 = bay2_handle && bay2_ej_handle &&
4643 (strlencmp(bay2_ej_path, "_EJ0") == 0 || experimental);
4644
4645 vdbg_printk(TPACPI_DBG_INIT,
4646 "bay 1: status %s, eject %s; bay 2: status %s, eject %s\n",
4647 str_supported(tp_features.bay_status),
4648 str_supported(tp_features.bay_eject),
4649 str_supported(tp_features.bay_status2),
4650 str_supported(tp_features.bay_eject2));
4651
4652 return (tp_features.bay_status || tp_features.bay_eject ||
4653 tp_features.bay_status2 || tp_features.bay_eject2)? 0 : 1;
4654}
4655
4656static void bay_notify(struct ibm_struct *ibm, u32 event)
4657{
4658 acpi_bus_generate_proc_event(ibm->acpi->device, event, 0);
4659 acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
4660 dev_name(&ibm->acpi->device->dev),
4661 event, 0);
4662}
4663
4664#define bay_occupied(b) (_sta(b##_handle) & 1)
4665
4666static int bay_read(char *p)
4667{
4668 int len = 0;
4669 int occupied = bay_occupied(bay);
4670 int occupied2 = bay_occupied(bay2);
4671 int eject, eject2;
4672
4673 len += sprintf(p + len, "status:\t\t%s\n",
4674 tp_features.bay_status ?
4675 (occupied ? "occupied" : "unoccupied") :
4676 "not supported");
4677 if (tp_features.bay_status2)
4678 len += sprintf(p + len, "status2:\t%s\n", occupied2 ?
4679 "occupied" : "unoccupied");
4680
4681 eject = tp_features.bay_eject && occupied;
4682 eject2 = tp_features.bay_eject2 && occupied2;
4683
4684 if (eject && eject2)
4685 len += sprintf(p + len, "commands:\teject, eject2\n");
4686 else if (eject)
4687 len += sprintf(p + len, "commands:\teject\n");
4688 else if (eject2)
4689 len += sprintf(p + len, "commands:\teject2\n");
4690
4691 return len;
4692}
4693
4694static int bay_write(char *buf)
4695{
4696 char *cmd;
4697
4698 if (!tp_features.bay_eject && !tp_features.bay_eject2)
4699 return -ENODEV;
4700
4701 while ((cmd = next_cmd(&buf))) {
4702 if (tp_features.bay_eject && strlencmp(cmd, "eject") == 0) {
4703 if (!acpi_evalf(bay_ej_handle, NULL, NULL, "vd", 1))
4704 return -EIO;
4705 } else if (tp_features.bay_eject2 &&
4706 strlencmp(cmd, "eject2") == 0) {
4707 if (!acpi_evalf(bay2_ej_handle, NULL, NULL, "vd", 1))
4708 return -EIO;
4709 } else
4710 return -EINVAL;
4711 }
4712
4713 return 0;
4714}
4715
4716static struct tp_acpi_drv_struct ibm_bay_acpidriver = {
4717 .notify = bay_notify,
4718 .handle = &bay_handle,
4719 .type = ACPI_SYSTEM_NOTIFY,
4720};
4721
4722static struct ibm_struct bay_driver_data = {
4723 .name = "bay",
4724 .read = bay_read,
4725 .write = bay_write,
4726 .acpi = &ibm_bay_acpidriver,
4727};
4728
4729#endif /* CONFIG_THINKPAD_ACPI_BAY */
4730
4731/*************************************************************************
4732 * CMOS subdriver 4429 * CMOS subdriver
4733 */ 4430 */
4734 4431
@@ -5945,14 +5642,48 @@ static struct backlight_ops ibm_backlight_data = {
5945 5642
5946/* --------------------------------------------------------------------- */ 5643/* --------------------------------------------------------------------- */
5947 5644
5645/*
5646 * These are only useful for models that have only one possibility
5647 * of GPU. If the BIOS model handles both ATI and Intel, don't use
5648 * these quirks.
5649 */
5650#define TPACPI_BRGHT_Q_NOEC 0x0001 /* Must NOT use EC HBRV */
5651#define TPACPI_BRGHT_Q_EC 0x0002 /* Should or must use EC HBRV */
5652#define TPACPI_BRGHT_Q_ASK 0x8000 /* Ask for user report */
5653
5654static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
5655 /* Models with ATI GPUs known to require ECNVRAM mode */
5656 TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */
5657
5658 /* Models with ATI GPUs (waiting confirmation) */
5659 TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
5660 TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
5661 TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
5662 TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
5663
5664 /* Models with Intel Extreme Graphics 2 (waiting confirmation) */
5665 TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
5666 TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
5667 TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
5668
5669 /* Models with Intel GMA900 */
5670 TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */
5671 TPACPI_Q_IBM('7', '4', TPACPI_BRGHT_Q_NOEC), /* X41 */
5672 TPACPI_Q_IBM('7', '5', TPACPI_BRGHT_Q_NOEC), /* X41 Tablet */
5673};
5674
5948static int __init brightness_init(struct ibm_init_struct *iibm) 5675static int __init brightness_init(struct ibm_init_struct *iibm)
5949{ 5676{
5950 int b; 5677 int b;
5678 unsigned long quirks;
5951 5679
5952 vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n"); 5680 vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n");
5953 5681
5954 mutex_init(&brightness_mutex); 5682 mutex_init(&brightness_mutex);
5955 5683
5684 quirks = tpacpi_check_quirks(brightness_quirk_table,
5685 ARRAY_SIZE(brightness_quirk_table));
5686
5956 /* 5687 /*
5957 * We always attempt to detect acpi support, so as to switch 5688 * We always attempt to detect acpi support, so as to switch
5958 * Lenovo Vista BIOS to ACPI brightness mode even if we are not 5689 * Lenovo Vista BIOS to ACPI brightness mode even if we are not
@@ -6009,23 +5740,13 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6009 /* TPACPI_BRGHT_MODE_AUTO not implemented yet, just use default */ 5740 /* TPACPI_BRGHT_MODE_AUTO not implemented yet, just use default */
6010 if (brightness_mode == TPACPI_BRGHT_MODE_AUTO || 5741 if (brightness_mode == TPACPI_BRGHT_MODE_AUTO ||
6011 brightness_mode == TPACPI_BRGHT_MODE_MAX) { 5742 brightness_mode == TPACPI_BRGHT_MODE_MAX) {
6012 if (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) { 5743 if (quirks & TPACPI_BRGHT_Q_EC)
6013 /* 5744 brightness_mode = TPACPI_BRGHT_MODE_ECNVRAM;
6014 * IBM models that define HBRV probably have 5745 else
6015 * EC-based backlight level control
6016 */
6017 if (acpi_evalf(ec_handle, NULL, "HBRV", "qd"))
6018 /* T40-T43, R50-R52, R50e, R51e, X31-X41 */
6019 brightness_mode = TPACPI_BRGHT_MODE_ECNVRAM;
6020 else
6021 /* all other IBM ThinkPads */
6022 brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP;
6023 } else
6024 /* All Lenovo ThinkPads */
6025 brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP; 5746 brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP;
6026 5747
6027 dbg_printk(TPACPI_DBG_BRGHT, 5748 dbg_printk(TPACPI_DBG_BRGHT,
6028 "selected brightness_mode=%d\n", 5749 "driver auto-selected brightness_mode=%d\n",
6029 brightness_mode); 5750 brightness_mode);
6030 } 5751 }
6031 5752
@@ -6052,6 +5773,15 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6052 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, 5773 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
6053 "brightness is supported\n"); 5774 "brightness is supported\n");
6054 5775
5776 if (quirks & TPACPI_BRGHT_Q_ASK) {
5777 printk(TPACPI_NOTICE
5778 "brightness: will use unverified default: "
5779 "brightness_mode=%d\n", brightness_mode);
5780 printk(TPACPI_NOTICE
5781 "brightness: please report to %s whether it works well "
5782 "or not on your ThinkPad\n", TPACPI_MAIL);
5783 }
5784
6055 ibm_backlight_device->props.max_brightness = 5785 ibm_backlight_device->props.max_brightness =
6056 (tp_features.bright_16levels)? 15 : 7; 5786 (tp_features.bright_16levels)? 15 : 7;
6057 ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK; 5787 ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
@@ -7854,22 +7584,6 @@ static struct ibm_init_struct ibms_init[] __initdata = {
7854 .init = light_init, 7584 .init = light_init,
7855 .data = &light_driver_data, 7585 .data = &light_driver_data,
7856 }, 7586 },
7857#ifdef CONFIG_THINKPAD_ACPI_DOCK
7858 {
7859 .init = dock_init,
7860 .data = &dock_driver_data[0],
7861 },
7862 {
7863 .init = dock_init2,
7864 .data = &dock_driver_data[1],
7865 },
7866#endif
7867#ifdef CONFIG_THINKPAD_ACPI_BAY
7868 {
7869 .init = bay_init,
7870 .data = &bay_driver_data,
7871 },
7872#endif
7873 { 7587 {
7874 .init = cmos_init, 7588 .init = cmos_init,
7875 .data = &cmos_driver_data, 7589 .data = &cmos_driver_data,
@@ -7968,12 +7682,6 @@ TPACPI_PARAM(hotkey);
7968TPACPI_PARAM(bluetooth); 7682TPACPI_PARAM(bluetooth);
7969TPACPI_PARAM(video); 7683TPACPI_PARAM(video);
7970TPACPI_PARAM(light); 7684TPACPI_PARAM(light);
7971#ifdef CONFIG_THINKPAD_ACPI_DOCK
7972TPACPI_PARAM(dock);
7973#endif
7974#ifdef CONFIG_THINKPAD_ACPI_BAY
7975TPACPI_PARAM(bay);
7976#endif /* CONFIG_THINKPAD_ACPI_BAY */
7977TPACPI_PARAM(cmos); 7685TPACPI_PARAM(cmos);
7978TPACPI_PARAM(led); 7686TPACPI_PARAM(led);
7979TPACPI_PARAM(beep); 7687TPACPI_PARAM(beep);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 7eda34838bfe..bdbc4f73fcdc 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -43,6 +43,13 @@ config BATTERY_DS2760
43 help 43 help
44 Say Y here to enable support for batteries with ds2760 chip. 44 Say Y here to enable support for batteries with ds2760 chip.
45 45
46config BATTERY_DS2782
47 tristate "DS2782 standalone gas-gauge"
48 depends on I2C
49 help
50 Say Y here to enable support for the DS2782 standalone battery
51 gas-gauge.
52
46config BATTERY_PMU 53config BATTERY_PMU
47 tristate "Apple PMU battery" 54 tristate "Apple PMU battery"
48 depends on PPC32 && ADB_PMU 55 depends on PPC32 && ADB_PMU
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index daf3179689aa..380d17c9ae29 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_APM_POWER) += apm_power.o
19obj-$(CONFIG_WM8350_POWER) += wm8350_power.o 19obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
20 20
21obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o 21obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
22obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
22obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o 23obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
23obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o 24obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
24obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o 25obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
new file mode 100644
index 000000000000..da14f374cb60
--- /dev/null
+++ b/drivers/power/ds2782_battery.c
@@ -0,0 +1,330 @@
1/*
2 * I2C client/driver for the Maxim/Dallas DS2782 Stand-Alone Fuel Gauge IC
3 *
4 * Copyright (C) 2009 Bluewater Systems Ltd
5 *
6 * Author: Ryan Mallon <ryan@bluewatersys.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/swab.h>
19#include <linux/i2c.h>
20#include <linux/idr.h>
21#include <linux/power_supply.h>
22
23#define DS2782_REG_RARC 0x06 /* Remaining active relative capacity */
24
25#define DS2782_REG_VOLT_MSB 0x0c
26#define DS2782_REG_TEMP_MSB 0x0a
27#define DS2782_REG_CURRENT_MSB 0x0e
28
29/* EEPROM Block */
30#define DS2782_REG_RSNSP 0x69 /* Sense resistor value */
31
32/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
33#define DS2782_CURRENT_UNITS 1563
34
35#define to_ds2782_info(x) container_of(x, struct ds2782_info, battery)
36
37struct ds2782_info {
38 struct i2c_client *client;
39 struct power_supply battery;
40 int id;
41};
42
43static DEFINE_IDR(battery_id);
44static DEFINE_MUTEX(battery_lock);
45
46static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val)
47{
48 int ret;
49
50 ret = i2c_smbus_read_byte_data(info->client, reg);
51 if (ret < 0) {
52 dev_err(&info->client->dev, "register read failed\n");
53 return ret;
54 }
55
56 *val = ret;
57 return 0;
58}
59
60static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb,
61 s16 *val)
62{
63 int ret;
64
65 ret = swab16(i2c_smbus_read_word_data(info->client, reg_msb));
66 if (ret < 0) {
67 dev_err(&info->client->dev, "register read failed\n");
68 return ret;
69 }
70
71 *val = ret;
72 return 0;
73}
74
75static int ds2782_get_temp(struct ds2782_info *info, int *temp)
76{
77 s16 raw;
78 int err;
79
80 /*
81 * Temperature is measured in units of 0.125 degrees celcius, the
82 * power_supply class measures temperature in tenths of degrees
83 * celsius. The temperature value is stored as a 10 bit number, plus
84 * sign in the upper bits of a 16 bit register.
85 */
86 err = ds2782_read_reg16(info, DS2782_REG_TEMP_MSB, &raw);
87 if (err)
88 return err;
89 *temp = ((raw / 32) * 125) / 100;
90 return 0;
91}
92
93static int ds2782_get_current(struct ds2782_info *info, int *current_uA)
94{
95 int sense_res;
96 int err;
97 u8 sense_res_raw;
98 s16 raw;
99
100 /*
101 * The units of measurement for current are dependent on the value of
102 * the sense resistor.
103 */
104 err = ds2782_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw);
105 if (err)
106 return err;
107 if (sense_res_raw == 0) {
108 dev_err(&info->client->dev, "sense resistor value is 0\n");
109 return -ENXIO;
110 }
111 sense_res = 1000 / sense_res_raw;
112
113 dev_dbg(&info->client->dev, "sense resistor = %d milli-ohms\n",
114 sense_res);
115 err = ds2782_read_reg16(info, DS2782_REG_CURRENT_MSB, &raw);
116 if (err)
117 return err;
118 *current_uA = raw * (DS2782_CURRENT_UNITS / sense_res);
119 return 0;
120}
121
122static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA)
123{
124 s16 raw;
125 int err;
126
127 /*
128 * Voltage is measured in units of 4.88mV. The voltage is stored as
129 * a 10-bit number plus sign, in the upper bits of a 16-bit register
130 */
131 err = ds2782_read_reg16(info, DS2782_REG_VOLT_MSB, &raw);
132 if (err)
133 return err;
134 *voltage_uA = (raw / 32) * 4800;
135 return 0;
136}
137
138static int ds2782_get_capacity(struct ds2782_info *info, int *capacity)
139{
140 int err;
141 u8 raw;
142
143 err = ds2782_read_reg(info, DS2782_REG_RARC, &raw);
144 if (err)
145 return err;
146 *capacity = raw;
147 return raw;
148}
149
150static int ds2782_get_status(struct ds2782_info *info, int *status)
151{
152 int err;
153 int current_uA;
154 int capacity;
155
156 err = ds2782_get_current(info, &current_uA);
157 if (err)
158 return err;
159
160 err = ds2782_get_capacity(info, &capacity);
161 if (err)
162 return err;
163
164 if (capacity == 100)
165 *status = POWER_SUPPLY_STATUS_FULL;
166 else if (current_uA == 0)
167 *status = POWER_SUPPLY_STATUS_NOT_CHARGING;
168 else if (current_uA < 0)
169 *status = POWER_SUPPLY_STATUS_DISCHARGING;
170 else
171 *status = POWER_SUPPLY_STATUS_CHARGING;
172
173 return 0;
174}
175
176static int ds2782_battery_get_property(struct power_supply *psy,
177 enum power_supply_property prop,
178 union power_supply_propval *val)
179{
180 struct ds2782_info *info = to_ds2782_info(psy);
181 int ret;
182
183 switch (prop) {
184 case POWER_SUPPLY_PROP_STATUS:
185 ret = ds2782_get_status(info, &val->intval);
186 break;
187
188 case POWER_SUPPLY_PROP_CAPACITY:
189 ret = ds2782_get_capacity(info, &val->intval);
190 break;
191
192 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
193 ret = ds2782_get_voltage(info, &val->intval);
194 break;
195
196 case POWER_SUPPLY_PROP_CURRENT_NOW:
197 ret = ds2782_get_current(info, &val->intval);
198 break;
199
200 case POWER_SUPPLY_PROP_TEMP:
201 ret = ds2782_get_temp(info, &val->intval);
202 break;
203
204 default:
205 ret = -EINVAL;
206 }
207
208 return ret;
209}
210
211static enum power_supply_property ds2782_battery_props[] = {
212 POWER_SUPPLY_PROP_STATUS,
213 POWER_SUPPLY_PROP_CAPACITY,
214 POWER_SUPPLY_PROP_VOLTAGE_NOW,
215 POWER_SUPPLY_PROP_CURRENT_NOW,
216 POWER_SUPPLY_PROP_TEMP,
217};
218
219static void ds2782_power_supply_init(struct power_supply *battery)
220{
221 battery->type = POWER_SUPPLY_TYPE_BATTERY;
222 battery->properties = ds2782_battery_props;
223 battery->num_properties = ARRAY_SIZE(ds2782_battery_props);
224 battery->get_property = ds2782_battery_get_property;
225 battery->external_power_changed = NULL;
226}
227
228static int ds2782_battery_remove(struct i2c_client *client)
229{
230 struct ds2782_info *info = i2c_get_clientdata(client);
231
232 power_supply_unregister(&info->battery);
233 kfree(info->battery.name);
234
235 mutex_lock(&battery_lock);
236 idr_remove(&battery_id, info->id);
237 mutex_unlock(&battery_lock);
238
239 i2c_set_clientdata(client, info);
240
241 kfree(info);
242 return 0;
243}
244
245static int ds2782_battery_probe(struct i2c_client *client,
246 const struct i2c_device_id *id)
247{
248 struct ds2782_info *info;
249 int ret;
250 int num;
251
252 /* Get an ID for this battery */
253 ret = idr_pre_get(&battery_id, GFP_KERNEL);
254 if (ret == 0) {
255 ret = -ENOMEM;
256 goto fail_id;
257 }
258
259 mutex_lock(&battery_lock);
260 ret = idr_get_new(&battery_id, client, &num);
261 mutex_unlock(&battery_lock);
262 if (ret < 0)
263 goto fail_id;
264
265 info = kzalloc(sizeof(*info), GFP_KERNEL);
266 if (!info) {
267 ret = -ENOMEM;
268 goto fail_info;
269 }
270
271 info->battery.name = kasprintf(GFP_KERNEL, "ds2782-%d", num);
272 if (!info->battery.name) {
273 ret = -ENOMEM;
274 goto fail_name;
275 }
276
277 i2c_set_clientdata(client, info);
278 info->client = client;
279 ds2782_power_supply_init(&info->battery);
280
281 ret = power_supply_register(&client->dev, &info->battery);
282 if (ret) {
283 dev_err(&client->dev, "failed to register battery\n");
284 goto fail_register;
285 }
286
287 return 0;
288
289fail_register:
290 kfree(info->battery.name);
291fail_name:
292 i2c_set_clientdata(client, info);
293 kfree(info);
294fail_info:
295 mutex_lock(&battery_lock);
296 idr_remove(&battery_id, num);
297 mutex_unlock(&battery_lock);
298fail_id:
299 return ret;
300}
301
302static const struct i2c_device_id ds2782_id[] = {
303 {"ds2782", 0},
304 {},
305};
306
307static struct i2c_driver ds2782_battery_driver = {
308 .driver = {
309 .name = "ds2782-battery",
310 },
311 .probe = ds2782_battery_probe,
312 .remove = ds2782_battery_remove,
313 .id_table = ds2782_id,
314};
315
316static int __init ds2782_init(void)
317{
318 return i2c_add_driver(&ds2782_battery_driver);
319}
320module_init(ds2782_init);
321
322static void __exit ds2782_exit(void)
323{
324 i2c_del_driver(&ds2782_battery_driver);
325}
326module_exit(ds2782_exit);
327
328MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com>");
329MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
330MODULE_LICENSE("GPL");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 5fbca2681baa..58e419299cd6 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/kernel.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/err.h> 13#include <linux/err.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
@@ -35,6 +36,7 @@
35#define BAT_STAT_AC 0x10 36#define BAT_STAT_AC 0x10
36#define BAT_STAT_CHARGING 0x20 37#define BAT_STAT_CHARGING 0x20
37#define BAT_STAT_DISCHARGING 0x40 38#define BAT_STAT_DISCHARGING 0x40
39#define BAT_STAT_TRICKLE 0x80
38 40
39#define BAT_ERR_INFOFAIL 0x02 41#define BAT_ERR_INFOFAIL 0x02
40#define BAT_ERR_OVERVOLTAGE 0x04 42#define BAT_ERR_OVERVOLTAGE 0x04
@@ -89,7 +91,7 @@ static char bat_serial[17]; /* Ick */
89static int olpc_bat_get_status(union power_supply_propval *val, uint8_t ec_byte) 91static int olpc_bat_get_status(union power_supply_propval *val, uint8_t ec_byte)
90{ 92{
91 if (olpc_platform_info.ecver > 0x44) { 93 if (olpc_platform_info.ecver > 0x44) {
92 if (ec_byte & BAT_STAT_CHARGING) 94 if (ec_byte & (BAT_STAT_CHARGING | BAT_STAT_TRICKLE))
93 val->intval = POWER_SUPPLY_STATUS_CHARGING; 95 val->intval = POWER_SUPPLY_STATUS_CHARGING;
94 else if (ec_byte & BAT_STAT_DISCHARGING) 96 else if (ec_byte & BAT_STAT_DISCHARGING)
95 val->intval = POWER_SUPPLY_STATUS_DISCHARGING; 97 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
@@ -219,7 +221,8 @@ static int olpc_bat_get_property(struct power_supply *psy,
219 It doesn't matter though -- the EC will return the last-known 221 It doesn't matter though -- the EC will return the last-known
220 information, and it's as if we just ran that _little_ bit faster 222 information, and it's as if we just ran that _little_ bit faster
221 and managed to read it out before the battery went away. */ 223 and managed to read it out before the battery went away. */
222 if (!(ec_byte & BAT_STAT_PRESENT) && psp != POWER_SUPPLY_PROP_PRESENT) 224 if (!(ec_byte & (BAT_STAT_PRESENT | BAT_STAT_TRICKLE)) &&
225 psp != POWER_SUPPLY_PROP_PRESENT)
223 return -ENODEV; 226 return -ENODEV;
224 227
225 switch (psp) { 228 switch (psp) {
@@ -229,7 +232,8 @@ static int olpc_bat_get_property(struct power_supply *psy,
229 return ret; 232 return ret;
230 break; 233 break;
231 case POWER_SUPPLY_PROP_PRESENT: 234 case POWER_SUPPLY_PROP_PRESENT:
232 val->intval = !!(ec_byte & BAT_STAT_PRESENT); 235 val->intval = !!(ec_byte & (BAT_STAT_PRESENT |
236 BAT_STAT_TRICKLE));
233 break; 237 break;
234 238
235 case POWER_SUPPLY_PROP_HEALTH: 239 case POWER_SUPPLY_PROP_HEALTH:
@@ -334,21 +338,21 @@ static ssize_t olpc_bat_eeprom_read(struct kobject *kobj,
334 struct bin_attribute *attr, char *buf, loff_t off, size_t count) 338 struct bin_attribute *attr, char *buf, loff_t off, size_t count)
335{ 339{
336 uint8_t ec_byte; 340 uint8_t ec_byte;
337 int ret, end; 341 int ret;
342 int i;
338 343
339 if (off >= EEPROM_SIZE) 344 if (off >= EEPROM_SIZE)
340 return 0; 345 return 0;
341 if (off + count > EEPROM_SIZE) 346 if (off + count > EEPROM_SIZE)
342 count = EEPROM_SIZE - off; 347 count = EEPROM_SIZE - off;
343 348
344 end = EEPROM_START + off + count; 349 for (i = 0; i < count; i++) {
345 for (ec_byte = EEPROM_START + off; ec_byte < end; ec_byte++) { 350 ec_byte = EEPROM_START + off + i;
346 ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, 351 ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &buf[i], 1);
347 &buf[ec_byte - EEPROM_START], 1);
348 if (ret) { 352 if (ret) {
349 printk(KERN_ERR "olpc-battery: EC command " 353 pr_err("olpc-battery: "
350 "EC_BAT_EEPROM @ 0x%x failed -" 354 "EC_BAT_EEPROM cmd @ 0x%x failed - %d!\n",
351 " %d!\n", ec_byte, ret); 355 ec_byte, ret);
352 return -EIO; 356 return -EIO;
353 } 357 }
354 } 358 }
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 8bde92126d34..b787335a8419 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -33,14 +33,14 @@ static enum power_supply_property *prop;
33 33
34static unsigned long wm97xx_read_bat(struct power_supply *bat_ps) 34static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
35{ 35{
36 return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data, 36 return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev->parent),
37 pdata->batt_aux) * pdata->batt_mult / 37 pdata->batt_aux) * pdata->batt_mult /
38 pdata->batt_div; 38 pdata->batt_div;
39} 39}
40 40
41static unsigned long wm97xx_read_temp(struct power_supply *bat_ps) 41static unsigned long wm97xx_read_temp(struct power_supply *bat_ps)
42{ 42{
43 return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data, 43 return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev->parent),
44 pdata->temp_aux) * pdata->temp_mult / 44 pdata->temp_aux) * pdata->temp_mult /
45 pdata->temp_div; 45 pdata->temp_div;
46} 46}
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index aafd3e6ebb0d..a118eb0f1e67 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Blackfin On-Chip Real Time Clock Driver 2 * Blackfin On-Chip Real Time Clock Driver
3 * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789] 3 * Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
4 * 4 *
5 * Copyright 2004-2008 Analog Devices Inc. 5 * Copyright 2004-2009 Analog Devices Inc.
6 * 6 *
7 * Enter bugs at http://blackfin.uclinux.org/ 7 * Enter bugs at http://blackfin.uclinux.org/
8 * 8 *
@@ -363,7 +363,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
363 struct bfin_rtc *rtc; 363 struct bfin_rtc *rtc;
364 struct device *dev = &pdev->dev; 364 struct device *dev = &pdev->dev;
365 int ret = 0; 365 int ret = 0;
366 unsigned long timeout; 366 unsigned long timeout = jiffies + HZ;
367 367
368 dev_dbg_stamp(dev); 368 dev_dbg_stamp(dev);
369 369
@@ -374,32 +374,32 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
374 platform_set_drvdata(pdev, rtc); 374 platform_set_drvdata(pdev, rtc);
375 device_init_wakeup(dev, 1); 375 device_init_wakeup(dev, 1);
376 376
377 /* Register our RTC with the RTC framework */
378 rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops,
379 THIS_MODULE);
380 if (unlikely(IS_ERR(rtc->rtc_dev))) {
381 ret = PTR_ERR(rtc->rtc_dev);
382 goto err;
383 }
384
377 /* Grab the IRQ and init the hardware */ 385 /* Grab the IRQ and init the hardware */
378 ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev); 386 ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev);
379 if (unlikely(ret)) 387 if (unlikely(ret))
380 goto err; 388 goto err_reg;
381 /* sometimes the bootloader touched things, but the write complete was not 389 /* sometimes the bootloader touched things, but the write complete was not
382 * enabled, so let's just do a quick timeout here since the IRQ will not fire ... 390 * enabled, so let's just do a quick timeout here since the IRQ will not fire ...
383 */ 391 */
384 timeout = jiffies + HZ;
385 while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING) 392 while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)
386 if (time_after(jiffies, timeout)) 393 if (time_after(jiffies, timeout))
387 break; 394 break;
388 bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE); 395 bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE);
389 bfin_write_RTC_SWCNT(0); 396 bfin_write_RTC_SWCNT(0);
390 397
391 /* Register our RTC with the RTC framework */
392 rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE);
393 if (unlikely(IS_ERR(rtc->rtc_dev))) {
394 ret = PTR_ERR(rtc->rtc_dev);
395 goto err_irq;
396 }
397
398 return 0; 398 return 0;
399 399
400 err_irq: 400err_reg:
401 free_irq(IRQ_RTC, dev); 401 rtc_device_unregister(rtc->rtc_dev);
402 err: 402err:
403 kfree(rtc); 403 kfree(rtc);
404 return ret; 404 return ret;
405} 405}
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 23e10b6263d6..f7a4701bf863 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1174,23 +1174,34 @@ static struct platform_driver cmos_platform_driver = {
1174 } 1174 }
1175}; 1175};
1176 1176
1177#ifdef CONFIG_PNP
1178static bool pnp_driver_registered;
1179#endif
1180static bool platform_driver_registered;
1181
1177static int __init cmos_init(void) 1182static int __init cmos_init(void)
1178{ 1183{
1179 int retval = 0; 1184 int retval = 0;
1180 1185
1181#ifdef CONFIG_PNP 1186#ifdef CONFIG_PNP
1182 pnp_register_driver(&cmos_pnp_driver); 1187 retval = pnp_register_driver(&cmos_pnp_driver);
1188 if (retval == 0)
1189 pnp_driver_registered = true;
1183#endif 1190#endif
1184 1191
1185 if (!cmos_rtc.dev) 1192 if (!cmos_rtc.dev) {
1186 retval = platform_driver_probe(&cmos_platform_driver, 1193 retval = platform_driver_probe(&cmos_platform_driver,
1187 cmos_platform_probe); 1194 cmos_platform_probe);
1195 if (retval == 0)
1196 platform_driver_registered = true;
1197 }
1188 1198
1189 if (retval == 0) 1199 if (retval == 0)
1190 return 0; 1200 return 0;
1191 1201
1192#ifdef CONFIG_PNP 1202#ifdef CONFIG_PNP
1193 pnp_unregister_driver(&cmos_pnp_driver); 1203 if (pnp_driver_registered)
1204 pnp_unregister_driver(&cmos_pnp_driver);
1194#endif 1205#endif
1195 return retval; 1206 return retval;
1196} 1207}
@@ -1199,9 +1210,11 @@ module_init(cmos_init);
1199static void __exit cmos_exit(void) 1210static void __exit cmos_exit(void)
1200{ 1211{
1201#ifdef CONFIG_PNP 1212#ifdef CONFIG_PNP
1202 pnp_unregister_driver(&cmos_pnp_driver); 1213 if (pnp_driver_registered)
1214 pnp_unregister_driver(&cmos_pnp_driver);
1203#endif 1215#endif
1204 platform_driver_unregister(&cmos_platform_driver); 1216 if (platform_driver_registered)
1217 platform_driver_unregister(&cmos_platform_driver);
1205} 1218}
1206module_exit(cmos_exit); 1219module_exit(cmos_exit);
1207 1220
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 32b27739ec2a..713f7bf5afb3 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -283,7 +283,7 @@ static void ds1374_work(struct work_struct *work)
283 283
284 stat = i2c_smbus_read_byte_data(client, DS1374_REG_SR); 284 stat = i2c_smbus_read_byte_data(client, DS1374_REG_SR);
285 if (stat < 0) 285 if (stat < 0)
286 return; 286 goto unlock;
287 287
288 if (stat & DS1374_REG_SR_AF) { 288 if (stat & DS1374_REG_SR_AF) {
289 stat &= ~DS1374_REG_SR_AF; 289 stat &= ~DS1374_REG_SR_AF;
@@ -302,7 +302,7 @@ static void ds1374_work(struct work_struct *work)
302out: 302out:
303 if (!ds1374->exiting) 303 if (!ds1374->exiting)
304 enable_irq(client->irq); 304 enable_irq(client->irq);
305 305unlock:
306 mutex_unlock(&ds1374->mutex); 306 mutex_unlock(&ds1374->mutex);
307} 307}
308 308
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index f11297aff854..2c839d0d21bd 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for NEC VR4100 series Real Time Clock unit. 2 * Driver for NEC VR4100 series Real Time Clock unit.
3 * 3 *
4 * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 4 * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -33,7 +33,7 @@
33#include <asm/io.h> 33#include <asm/io.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35 35
36MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); 36MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
37MODULE_DESCRIPTION("NEC VR4100 series RTC driver"); 37MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
38MODULE_LICENSE("GPL v2"); 38MODULE_LICENSE("GPL v2");
39 39
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index f8b1f04f26b8..c11770f5b368 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1696,8 +1696,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1696 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1696 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1697 "unsolicited interrupt received " 1697 "unsolicited interrupt received "
1698 "(sense available)"); 1698 "(sense available)");
1699 device->discipline->dump_sense_dbf(device, NULL, irb, 1699 device->discipline->dump_sense_dbf(device, irb, "unsolicited");
1700 "unsolicited");
1701 } 1700 }
1702 1701
1703 dasd_schedule_device_bh(device); 1702 dasd_schedule_device_bh(device);
@@ -2941,42 +2940,20 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
2941} 2940}
2942 2941
2943static void 2942static void
2944dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req, 2943dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
2945 struct irb *irb, char *reason) 2944 char *reason)
2946{ 2945{
2947 u64 *sense; 2946 u64 *sense;
2948 int sl;
2949 struct tsb *tsb;
2950 2947
2951 sense = NULL; 2948 sense = (u64 *) dasd_get_sense(irb);
2952 tsb = NULL;
2953 if (req && scsw_is_tm(&req->irb.scsw)) {
2954 if (irb->scsw.tm.tcw)
2955 tsb = tcw_get_tsb(
2956 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
2957 if (tsb && (irb->scsw.tm.fcxs == 0x01)) {
2958 switch (tsb->flags & 0x07) {
2959 case 1: /* tsa_iostat */
2960 sense = (u64 *)tsb->tsa.iostat.sense;
2961 break;
2962 case 2: /* ts_ddpc */
2963 sense = (u64 *)tsb->tsa.ddpc.sense;
2964 break;
2965 case 3: /* tsa_intrg */
2966 break;
2967 }
2968 }
2969 } else {
2970 if (irb->esw.esw0.erw.cons)
2971 sense = (u64 *)irb->ecw;
2972 }
2973 if (sense) { 2949 if (sense) {
2974 for (sl = 0; sl < 4; sl++) { 2950 DBF_DEV_EVENT(DBF_EMERG, device,
2975 DBF_DEV_EVENT(DBF_EMERG, device, 2951 "%s: %s %02x%02x%02x %016llx %016llx %016llx "
2976 "%s: %016llx %016llx %016llx %016llx", 2952 "%016llx", reason,
2977 reason, sense[0], sense[1], sense[2], 2953 scsw_is_tm(&irb->scsw) ? "t" : "c",
2978 sense[3]); 2954 scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
2979 } 2955 scsw_dstat(&irb->scsw), sense[0], sense[1],
2956 sense[2], sense[3]);
2980 } else { 2957 } else {
2981 DBF_DEV_EVENT(DBF_EMERG, device, "%s", 2958 DBF_DEV_EVENT(DBF_EMERG, device, "%s",
2982 "SORRY - NO VALID SENSE AVAILABLE\n"); 2959 "SORRY - NO VALID SENSE AVAILABLE\n");
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d970ce2814be..cb8f9cef7429 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -172,7 +172,7 @@ dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
172 device = cqr->startdev; 172 device = cqr->startdev;
173 /* dump sense data to s390 debugfeature*/ 173 /* dump sense data to s390 debugfeature*/
174 if (device->discipline && device->discipline->dump_sense_dbf) 174 if (device->discipline && device->discipline->dump_sense_dbf)
175 device->discipline->dump_sense_dbf(device, cqr, irb, "log"); 175 device->discipline->dump_sense_dbf(device, irb, "log");
176} 176}
177EXPORT_SYMBOL(dasd_log_sense_dbf); 177EXPORT_SYMBOL(dasd_log_sense_dbf);
178 178
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index e21ee735f926..31849ad5e59f 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -241,7 +241,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
241 /* check for unsolicited interrupts */ 241 /* check for unsolicited interrupts */
242 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 242 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
243 "unsolicited interrupt received"); 243 "unsolicited interrupt received");
244 device->discipline->dump_sense_dbf(device, NULL, irb, "unsolicited"); 244 device->discipline->dump_sense_dbf(device, irb, "unsolicited");
245 dasd_schedule_device_bh(device); 245 dasd_schedule_device_bh(device);
246 return; 246 return;
247}; 247};
@@ -444,17 +444,20 @@ dasd_fba_fill_info(struct dasd_device * device,
444} 444}
445 445
446static void 446static void
447dasd_fba_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req, 447dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
448 struct irb *irb, char *reason) 448 char *reason)
449{ 449{
450 int sl; 450 u64 *sense;
451 if (irb->esw.esw0.erw.cons) { 451
452 for (sl = 0; sl < 4; sl++) { 452 sense = (u64 *) dasd_get_sense(irb);
453 DBF_DEV_EVENT(DBF_EMERG, device, 453 if (sense) {
454 "%s: %08x %08x %08x %08x", 454 DBF_DEV_EVENT(DBF_EMERG, device,
455 reason, irb->ecw[8 * 0], irb->ecw[8 * 1], 455 "%s: %s %02x%02x%02x %016llx %016llx %016llx "
456 irb->ecw[8 * 2], irb->ecw[8 * 3]); 456 "%016llx", reason,
457 } 457 scsw_is_tm(&irb->scsw) ? "t" : "c",
458 scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
459 scsw_dstat(&irb->scsw), sense[0], sense[1],
460 sense[2], sense[3]);
458 } else { 461 } else {
459 DBF_DEV_EVENT(DBF_EMERG, device, "%s", 462 DBF_DEV_EVENT(DBF_EMERG, device, "%s",
460 "SORRY - NO VALID SENSE AVAILABLE\n"); 463 "SORRY - NO VALID SENSE AVAILABLE\n");
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index fd63b2f2bda9..b699ca356ac5 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -284,8 +284,7 @@ struct dasd_discipline {
284 dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *); 284 dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
285 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, 285 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
286 struct irb *); 286 struct irb *);
287 void (*dump_sense_dbf) (struct dasd_device *, struct dasd_ccw_req *, 287 void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
288 struct irb *, char *);
289 288
290 void (*handle_unsolicited_interrupt) (struct dasd_device *, 289 void (*handle_unsolicited_interrupt) (struct dasd_device *,
291 struct irb *); 290 struct irb *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 4ce3f72ee1c1..df918ef27965 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -16,6 +16,7 @@
16#include <linux/major.h> 16#include <linux/major.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/blkpg.h> 18#include <linux/blkpg.h>
19#include <linux/smp_lock.h>
19 20
20#include <asm/ccwdev.h> 21#include <asm/ccwdev.h>
21#include <asm/cmb.h> 22#include <asm/cmb.h>
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 016f9e9d2591..d34617682a62 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -964,7 +964,8 @@ static int dcssblk_freeze(struct device *dev)
964 break; 964 break;
965 } 965 }
966 if (rc) 966 if (rc)
967 pr_err("Suspend failed because device %s is writeable.\n", 967 pr_err("Suspending the system failed because DCSS device %s "
968 "is writable\n",
968 dev_info->segment_name); 969 dev_info->segment_name);
969 return rc; 970 return rc;
970} 971}
@@ -987,8 +988,8 @@ static int dcssblk_restore(struct device *dev)
987 goto out_panic; 988 goto out_panic;
988 } 989 }
989 if (start != entry->start || end != entry->end) { 990 if (start != entry->start || end != entry->end) {
990 pr_err("Mismatch of start / end address after " 991 pr_err("The address range of DCSS %s changed "
991 "resuming device %s\n", 992 "while the system was suspended\n",
992 entry->segment_name); 993 entry->segment_name);
993 goto out_panic; 994 goto out_panic;
994 } 995 }
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 2e9e1ecd6d82..db442cd6621e 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -443,7 +443,7 @@ fail:
443 */ 443 */
444static void xpram_resume_error(const char *message) 444static void xpram_resume_error(const char *message)
445{ 445{
446 pr_err("Resume error: %s\n", message); 446 pr_err("Resuming the system failed: %s\n", message);
447 panic("xpram resume error\n"); 447 panic("xpram resume error\n");
448} 448}
449 449
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 7892550d7932..3234e90bd7f9 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -320,7 +320,7 @@ static int mon_open(struct inode *inode, struct file *filp)
320 goto out_path; 320 goto out_path;
321 } 321 }
322 filp->private_data = monpriv; 322 filp->private_data = monpriv;
323 dev_set_drvdata(&monreader_device, monpriv); 323 dev_set_drvdata(monreader_device, monpriv);
324 unlock_kernel(); 324 unlock_kernel();
325 return nonseekable_open(inode, filp); 325 return nonseekable_open(inode, filp);
326 326
@@ -463,7 +463,7 @@ static struct miscdevice mon_dev = {
463 *****************************************************************************/ 463 *****************************************************************************/
464static int monreader_freeze(struct device *dev) 464static int monreader_freeze(struct device *dev)
465{ 465{
466 struct mon_private *monpriv = dev_get_drvdata(&dev); 466 struct mon_private *monpriv = dev_get_drvdata(dev);
467 int rc; 467 int rc;
468 468
469 if (!monpriv) 469 if (!monpriv)
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
index 85f491ea929c..7a7bfc947d97 100644
--- a/drivers/s390/char/sclp_rw.h
+++ b/drivers/s390/char/sclp_rw.h
@@ -92,5 +92,10 @@ void sclp_set_columns(struct sclp_buffer *, unsigned short);
92void sclp_set_htab(struct sclp_buffer *, unsigned short); 92void sclp_set_htab(struct sclp_buffer *, unsigned short);
93int sclp_chars_in_buffer(struct sclp_buffer *); 93int sclp_chars_in_buffer(struct sclp_buffer *);
94 94
95#ifdef CONFIG_SCLP_CONSOLE
95void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event); 96void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
97#else
98static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { }
99#endif
100
96#endif /* __SCLP_RW_H__ */ 101#endif /* __SCLP_RW_H__ */
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index cb7854c10c04..f2bc287b69e4 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -250,14 +250,14 @@ static int vmwdt_resume(void)
250static int vmwdt_suspend(void) 250static int vmwdt_suspend(void)
251{ 251{
252 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { 252 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
253 pr_err("The watchdog is in use. " 253 pr_err("The system cannot be suspended while the watchdog"
254 "This prevents hibernation or suspend.\n"); 254 " is in use\n");
255 return NOTIFY_BAD; 255 return NOTIFY_BAD;
256 } 256 }
257 if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) { 257 if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
258 clear_bit(VMWDT_OPEN, &vmwdt_is_open); 258 clear_bit(VMWDT_OPEN, &vmwdt_is_open);
259 pr_err("The watchdog is running. " 259 pr_err("The system cannot be suspended while the watchdog"
260 "This prevents hibernation or suspend.\n"); 260 " is running\n");
261 return NOTIFY_BAD; 261 return NOTIFY_BAD;
262 } 262 }
263 return NOTIFY_DONE; 263 return NOTIFY_DONE;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 727a809636d8..ed3dcdea7fe1 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1145,12 +1145,17 @@ ap_config_timeout(unsigned long ptr)
1145 */ 1145 */
1146static inline void ap_schedule_poll_timer(void) 1146static inline void ap_schedule_poll_timer(void)
1147{ 1147{
1148 ktime_t hr_time;
1148 if (ap_using_interrupts() || ap_suspend_flag) 1149 if (ap_using_interrupts() || ap_suspend_flag)
1149 return; 1150 return;
1150 if (hrtimer_is_queued(&ap_poll_timer)) 1151 if (hrtimer_is_queued(&ap_poll_timer))
1151 return; 1152 return;
1152 hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), 1153 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1153 HRTIMER_MODE_ABS); 1154 hr_time = ktime_set(0, poll_timeout);
1155 hrtimer_forward_now(&ap_poll_timer, hr_time);
1156 hrtimer_restart(&ap_poll_timer);
1157 }
1158 return;
1154} 1159}
1155 1160
1156/** 1161/**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 8030e25152fb..c75d6f35cb5f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -553,40 +553,35 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
553 _zfcp_erp_unit_reopen(unit, clear, id, ref); 553 _zfcp_erp_unit_reopen(unit, clear, id, ref);
554} 554}
555 555
556static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act) 556static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
557{ 557{
558 struct zfcp_adapter *adapter = act->adapter;
559 struct zfcp_port *port = act->port;
560 struct zfcp_unit *unit = act->unit;
561 u32 status = act->status;
562
563 /* initiate follow-up actions depending on success of finished action */
564 switch (act->action) { 558 switch (act->action) {
565
566 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 559 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
567 if (status == ZFCP_ERP_SUCCEEDED) 560 _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL);
568 _zfcp_erp_port_reopen_all(adapter, 0, "ersfa_1", NULL);
569 else
570 _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_2", NULL);
571 break; 561 break;
572
573 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 562 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
574 if (status == ZFCP_ERP_SUCCEEDED) 563 _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL);
575 _zfcp_erp_port_reopen(port, 0, "ersfa_3", NULL);
576 else
577 _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_4", NULL);
578 break; 564 break;
579
580 case ZFCP_ERP_ACTION_REOPEN_PORT: 565 case ZFCP_ERP_ACTION_REOPEN_PORT:
581 if (status == ZFCP_ERP_SUCCEEDED) 566 _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL);
582 _zfcp_erp_unit_reopen_all(port, 0, "ersfa_5", NULL);
583 else
584 _zfcp_erp_port_forced_reopen(port, 0, "ersfa_6", NULL);
585 break; 567 break;
586
587 case ZFCP_ERP_ACTION_REOPEN_UNIT: 568 case ZFCP_ERP_ACTION_REOPEN_UNIT:
588 if (status != ZFCP_ERP_SUCCEEDED) 569 _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL);
589 _zfcp_erp_port_reopen(unit->port, 0, "ersfa_7", NULL); 570 break;
571 }
572}
573
574static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
575{
576 switch (act->action) {
577 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
578 _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL);
579 break;
580 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
581 _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL);
582 break;
583 case ZFCP_ERP_ACTION_REOPEN_PORT:
584 _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL);
590 break; 585 break;
591 } 586 }
592} 587}
@@ -801,7 +796,7 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
801 return ZFCP_ERP_FAILED; 796 return ZFCP_ERP_FAILED;
802 797
803 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: 798 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
804 if (status & ZFCP_STATUS_PORT_PHYS_OPEN) 799 if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
805 return ZFCP_ERP_SUCCEEDED; 800 return ZFCP_ERP_SUCCEEDED;
806 } 801 }
807 return ZFCP_ERP_FAILED; 802 return ZFCP_ERP_FAILED;
@@ -853,11 +848,17 @@ void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
853 gid_pn_work); 848 gid_pn_work);
854 849
855 retval = zfcp_fc_ns_gid_pn(&port->erp_action); 850 retval = zfcp_fc_ns_gid_pn(&port->erp_action);
856 if (retval == -ENOMEM) 851 if (!retval) {
857 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM); 852 port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
858 port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; 853 goto out;
859 if (retval) 854 }
860 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED); 855 if (retval == -ENOMEM) {
856 zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM);
857 goto out;
858 }
859 /* all other error condtions */
860 zfcp_erp_notify(&port->erp_action, 0);
861out:
861 zfcp_port_put(port); 862 zfcp_port_put(port);
862} 863}
863 864
@@ -1289,7 +1290,10 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1289 retval = zfcp_erp_strategy_statechange(erp_action, retval); 1290 retval = zfcp_erp_strategy_statechange(erp_action, retval);
1290 if (retval == ZFCP_ERP_EXIT) 1291 if (retval == ZFCP_ERP_EXIT)
1291 goto unlock; 1292 goto unlock;
1292 zfcp_erp_strategy_followup_actions(erp_action); 1293 if (retval == ZFCP_ERP_SUCCEEDED)
1294 zfcp_erp_strategy_followup_success(erp_action);
1295 if (retval == ZFCP_ERP_FAILED)
1296 zfcp_erp_strategy_followup_failed(erp_action);
1293 1297
1294 unlock: 1298 unlock:
1295 write_unlock(&adapter->erp_lock); 1299 write_unlock(&adapter->erp_lock);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 2f0705d76b72..47daebfa7e59 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -79,11 +79,9 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
79 79
80 mutex_unlock(&wka_port->mutex); 80 mutex_unlock(&wka_port->mutex);
81 81
82 wait_event_timeout( 82 wait_event(wka_port->completion_wq,
83 wka_port->completion_wq, 83 wka_port->status == ZFCP_WKA_PORT_ONLINE ||
84 wka_port->status == ZFCP_WKA_PORT_ONLINE || 84 wka_port->status == ZFCP_WKA_PORT_OFFLINE);
85 wka_port->status == ZFCP_WKA_PORT_OFFLINE,
86 HZ >> 1);
87 85
88 if (wka_port->status == ZFCP_WKA_PORT_ONLINE) { 86 if (wka_port->status == ZFCP_WKA_PORT_ONLINE) {
89 atomic_inc(&wka_port->refcount); 87 atomic_inc(&wka_port->refcount);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c57658f3d34f..47795fbf081f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -670,8 +670,11 @@ static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
670 zfcp_fsf_sbal_check(adapter), 5 * HZ); 670 zfcp_fsf_sbal_check(adapter), 5 * HZ);
671 if (ret > 0) 671 if (ret > 0)
672 return 0; 672 return 0;
673 if (!ret) 673 if (!ret) {
674 atomic_inc(&adapter->qdio_outb_full); 674 atomic_inc(&adapter->qdio_outb_full);
675 /* assume hanging outbound queue, try queue recovery */
676 zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
677 }
675 678
676 spin_lock_bh(&adapter->req_q_lock); 679 spin_lock_bh(&adapter->req_q_lock);
677 return -EIO; 680 return -EIO;
@@ -722,7 +725,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
722 req = zfcp_fsf_alloc_qtcb(pool); 725 req = zfcp_fsf_alloc_qtcb(pool);
723 726
724 if (unlikely(!req)) 727 if (unlikely(!req))
725 return ERR_PTR(-EIO); 728 return ERR_PTR(-ENOMEM);
726 729
727 if (adapter->req_no == 0) 730 if (adapter->req_no == 0)
728 adapter->req_no++; 731 adapter->req_no++;
@@ -1010,6 +1013,23 @@ skip_fsfstatus:
1010 send_ct->handler(send_ct->handler_data); 1013 send_ct->handler(send_ct->handler_data);
1011} 1014}
1012 1015
1016static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
1017 struct scatterlist *sg_req,
1018 struct scatterlist *sg_resp)
1019{
1020 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1021 sbale[2].addr = sg_virt(sg_req);
1022 sbale[2].length = sg_req->length;
1023 sbale[3].addr = sg_virt(sg_resp);
1024 sbale[3].length = sg_resp->length;
1025 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1026}
1027
1028static int zfcp_fsf_one_sbal(struct scatterlist *sg)
1029{
1030 return sg_is_last(sg) && sg->length <= PAGE_SIZE;
1031}
1032
1013static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 1033static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1014 struct scatterlist *sg_req, 1034 struct scatterlist *sg_req,
1015 struct scatterlist *sg_resp, 1035 struct scatterlist *sg_resp,
@@ -1020,30 +1040,30 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1020 int bytes; 1040 int bytes;
1021 1041
1022 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { 1042 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
1023 if (sg_req->length > PAGE_SIZE || sg_resp->length > PAGE_SIZE || 1043 if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
1024 !sg_is_last(sg_req) || !sg_is_last(sg_resp))
1025 return -EOPNOTSUPP; 1044 return -EOPNOTSUPP;
1026 1045
1027 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; 1046 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1028 sbale[2].addr = sg_virt(sg_req); 1047 return 0;
1029 sbale[2].length = sg_req->length; 1048 }
1030 sbale[3].addr = sg_virt(sg_resp); 1049
1031 sbale[3].length = sg_resp->length; 1050 /* use single, unchained SBAL if it can hold the request */
1032 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; 1051 if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
1052 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1033 return 0; 1053 return 0;
1034 } 1054 }
1035 1055
1036 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, 1056 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1037 sg_req, max_sbals); 1057 sg_req, max_sbals);
1038 if (bytes <= 0) 1058 if (bytes <= 0)
1039 return -ENOMEM; 1059 return -EIO;
1040 req->qtcb->bottom.support.req_buf_length = bytes; 1060 req->qtcb->bottom.support.req_buf_length = bytes;
1041 req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; 1061 req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1042 1062
1043 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, 1063 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1044 sg_resp, max_sbals); 1064 sg_resp, max_sbals);
1045 if (bytes <= 0) 1065 if (bytes <= 0)
1046 return -ENOMEM; 1066 return -EIO;
1047 req->qtcb->bottom.support.resp_buf_length = bytes; 1067 req->qtcb->bottom.support.resp_buf_length = bytes;
1048 1068
1049 return 0; 1069 return 0;
@@ -1607,10 +1627,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1607 case FSF_ACCESS_DENIED: 1627 case FSF_ACCESS_DENIED:
1608 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1628 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1609 break; 1629 break;
1610 case FSF_PORT_ALREADY_OPEN:
1611 break;
1612 case FSF_GOOD: 1630 case FSF_GOOD:
1613 wka_port->handle = header->port_handle; 1631 wka_port->handle = header->port_handle;
1632 /* fall through */
1633 case FSF_PORT_ALREADY_OPEN:
1614 wka_port->status = ZFCP_WKA_PORT_ONLINE; 1634 wka_port->status = ZFCP_WKA_PORT_ONLINE;
1615 } 1635 }
1616out: 1636out:
@@ -1731,15 +1751,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1731 zfcp_fsf_access_denied_port(req, port); 1751 zfcp_fsf_access_denied_port(req, port);
1732 break; 1752 break;
1733 case FSF_PORT_BOXED: 1753 case FSF_PORT_BOXED:
1734 zfcp_erp_port_boxed(port, "fscpph2", req);
1735 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1736 ZFCP_STATUS_FSFREQ_RETRY;
1737 /* can't use generic zfcp_erp_modify_port_status because 1754 /* can't use generic zfcp_erp_modify_port_status because
1738 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1755 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1739 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1756 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1740 list_for_each_entry(unit, &port->unit_list_head, list) 1757 list_for_each_entry(unit, &port->unit_list_head, list)
1741 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1758 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1742 &unit->status); 1759 &unit->status);
1760 zfcp_erp_port_boxed(port, "fscpph2", req);
1761 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1762 ZFCP_STATUS_FSFREQ_RETRY;
1763
1743 break; 1764 break;
1744 case FSF_ADAPTER_STATUS_AVAILABLE: 1765 case FSF_ADAPTER_STATUS_AVAILABLE:
1745 switch (header->fsf_status_qual.word[0]) { 1766 switch (header->fsf_status_qual.word[0]) {
@@ -2541,7 +2562,6 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2541 bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, 2562 bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg,
2542 FSF_MAX_SBALS_PER_REQ); 2563 FSF_MAX_SBALS_PER_REQ);
2543 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2564 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2544 retval = -ENOMEM;
2545 zfcp_fsf_req_free(req); 2565 zfcp_fsf_req_free(req);
2546 goto out; 2566 goto out;
2547 } 2567 }
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 967ede73f4c5..6925a1784682 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -167,20 +167,21 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
167 struct zfcp_unit *unit = scpnt->device->hostdata; 167 struct zfcp_unit *unit = scpnt->device->hostdata;
168 struct zfcp_fsf_req *old_req, *abrt_req; 168 struct zfcp_fsf_req *old_req, *abrt_req;
169 unsigned long flags; 169 unsigned long flags;
170 unsigned long old_req_id = (unsigned long) scpnt->host_scribble; 170 unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
171 int retval = SUCCESS; 171 int retval = SUCCESS;
172 int retry = 3; 172 int retry = 3;
173 char *dbf_tag;
173 174
174 /* avoid race condition between late normal completion and abort */ 175 /* avoid race condition between late normal completion and abort */
175 write_lock_irqsave(&adapter->abort_lock, flags); 176 write_lock_irqsave(&adapter->abort_lock, flags);
176 177
177 spin_lock(&adapter->req_list_lock); 178 spin_lock(&adapter->req_list_lock);
178 old_req = zfcp_reqlist_find(adapter, old_req_id); 179 old_req = zfcp_reqlist_find(adapter, old_reqid);
179 spin_unlock(&adapter->req_list_lock); 180 spin_unlock(&adapter->req_list_lock);
180 if (!old_req) { 181 if (!old_req) {
181 write_unlock_irqrestore(&adapter->abort_lock, flags); 182 write_unlock_irqrestore(&adapter->abort_lock, flags);
182 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 183 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL,
183 old_req_id); 184 old_reqid);
184 return FAILED; /* completion could be in progress */ 185 return FAILED; /* completion could be in progress */
185 } 186 }
186 old_req->data = NULL; 187 old_req->data = NULL;
@@ -189,7 +190,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
189 write_unlock_irqrestore(&adapter->abort_lock, flags); 190 write_unlock_irqrestore(&adapter->abort_lock, flags);
190 191
191 while (retry--) { 192 while (retry--) {
192 abrt_req = zfcp_fsf_abort_fcp_command(old_req_id, unit); 193 abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit);
193 if (abrt_req) 194 if (abrt_req)
194 break; 195 break;
195 196
@@ -197,7 +198,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
197 if (!(atomic_read(&adapter->status) & 198 if (!(atomic_read(&adapter->status) &
198 ZFCP_STATUS_COMMON_RUNNING)) { 199 ZFCP_STATUS_COMMON_RUNNING)) {
199 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 200 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
200 old_req_id); 201 old_reqid);
201 return SUCCESS; 202 return SUCCESS;
202 } 203 }
203 } 204 }
@@ -208,13 +209,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
208 abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 209 abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
209 210
210 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) 211 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
211 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, abrt_req, 0); 212 dbf_tag = "okay";
212 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) 213 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
213 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, abrt_req, 0); 214 dbf_tag = "lte2";
214 else { 215 else {
215 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, abrt_req, 0); 216 dbf_tag = "fail";
216 retval = FAILED; 217 retval = FAILED;
217 } 218 }
219 zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid);
218 zfcp_fsf_req_free(abrt_req); 220 zfcp_fsf_req_free(abrt_req);
219 return retval; 221 return retval;
220} 222}
@@ -534,6 +536,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
534 struct fc_rport_identifiers ids; 536 struct fc_rport_identifiers ids;
535 struct fc_rport *rport; 537 struct fc_rport *rport;
536 538
539 if (port->rport)
540 return;
541
537 ids.node_name = port->wwnn; 542 ids.node_name = port->wwnn;
538 ids.port_name = port->wwpn; 543 ids.port_name = port->wwpn;
539 ids.port_id = port->d_id; 544 ids.port_id = port->d_id;
@@ -557,8 +562,10 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
557{ 562{
558 struct fc_rport *rport = port->rport; 563 struct fc_rport *rport = port->rport;
559 564
560 if (rport) 565 if (rport) {
561 fc_remote_port_delete(rport); 566 fc_remote_port_delete(rport);
567 port->rport = NULL;
568 }
562} 569}
563 570
564void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) 571void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3e51e64d1108..0fe5cce818cb 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -494,9 +494,14 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
494 struct Scsi_Host *scsi_host = class_to_shost(dev); 494 struct Scsi_Host *scsi_host = class_to_shost(dev);
495 struct zfcp_adapter *adapter = 495 struct zfcp_adapter *adapter =
496 (struct zfcp_adapter *) scsi_host->hostdata[0]; 496 (struct zfcp_adapter *) scsi_host->hostdata[0];
497 u64 util;
498
499 spin_lock_bh(&adapter->qdio_stat_lock);
500 util = adapter->req_q_util;
501 spin_unlock_bh(&adapter->qdio_stat_lock);
497 502
498 return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), 503 return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full),
499 (unsigned long long)adapter->req_q_util); 504 (unsigned long long)util);
500} 505}
501static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); 506static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
502 507
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 0471f8800483..4240b05aef6d 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -2826,8 +2826,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2826 */ 2826 */
2827 2827
2828 local_irq_restore(flags); 2828 local_irq_restore(flags);
2829 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully\n" 2829 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
2830 KERN_INFO " before abortion\n", HOSTNO);
2831 2830
2832 /* Maybe it is sufficient just to release the ST-DMA lock... (if 2831 /* Maybe it is sufficient just to release the ST-DMA lock... (if
2833 * possible at all) At least, we should check if the lock could be 2832 * possible at all) At least, we should check if the lock could be
diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgb3i/Kbuild
index 25a2032bfa26..70d060b7ff4f 100644
--- a/drivers/scsi/cxgb3i/Kbuild
+++ b/drivers/scsi/cxgb3i/Kbuild
@@ -1,4 +1,4 @@
1EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3 1EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3
2 2
3cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o 3cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o
4obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o 4obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 74369a3f963b..c399f485aa7d 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/inet.h> 14#include <linux/inet.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/if_vlan.h>
16#include <net/dst.h> 17#include <net/dst.h>
17#include <net/tcp.h> 18#include <net/tcp.h>
18#include <scsi/scsi_cmnd.h> 19#include <scsi/scsi_cmnd.h>
@@ -184,6 +185,9 @@ static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
184 struct cxgb3i_adapter *snic; 185 struct cxgb3i_adapter *snic;
185 int i; 186 int i;
186 187
188 if (ndev->priv_flags & IFF_802_1Q_VLAN)
189 ndev = vlan_dev_real_dev(ndev);
190
187 read_lock(&cxgb3i_snic_rwlock); 191 read_lock(&cxgb3i_snic_rwlock);
188 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { 192 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
189 for (i = 0; i < snic->hba_cnt; i++) { 193 for (i = 0; i < snic->hba_cnt; i++) {
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index a84072865fc2..2c266c01dc5a 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -473,16 +473,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
473 * limitation for the device. Try 40-bit first, and 473 * limitation for the device. Try 40-bit first, and
474 * fail to 32-bit. 474 * fail to 32-bit.
475 */ 475 */
476 err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); 476 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
477 if (err) { 477 if (err) {
478 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 478 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
479 if (err) { 479 if (err) {
480 shost_printk(KERN_ERR, fnic->lport->host, 480 shost_printk(KERN_ERR, fnic->lport->host,
481 "No usable DMA configuration " 481 "No usable DMA configuration "
482 "aborting\n"); 482 "aborting\n");
483 goto err_out_release_regions; 483 goto err_out_release_regions;
484 } 484 }
485 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 485 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
486 if (err) { 486 if (err) {
487 shost_printk(KERN_ERR, fnic->lport->host, 487 shost_printk(KERN_ERR, fnic->lport->host,
488 "Unable to obtain 32-bit DMA " 488 "Unable to obtain 32-bit DMA "
@@ -490,7 +490,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
490 goto err_out_release_regions; 490 goto err_out_release_regions;
491 } 491 }
492 } else { 492 } else {
493 err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); 493 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
494 if (err) { 494 if (err) {
495 shost_printk(KERN_ERR, fnic->lport->host, 495 shost_printk(KERN_ERR, fnic->lport->host,
496 "Unable to obtain 40-bit DMA " 496 "Unable to obtain 40-bit DMA "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index eabf36502856..bfc996971b81 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -245,7 +245,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
245 struct vnic_wq_copy *wq, 245 struct vnic_wq_copy *wq,
246 struct fnic_io_req *io_req, 246 struct fnic_io_req *io_req,
247 struct scsi_cmnd *sc, 247 struct scsi_cmnd *sc,
248 u32 sg_count) 248 int sg_count)
249{ 249{
250 struct scatterlist *sg; 250 struct scatterlist *sg;
251 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); 251 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
@@ -260,9 +260,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
260 char msg[2]; 260 char msg[2];
261 261
262 if (sg_count) { 262 if (sg_count) {
263 BUG_ON(sg_count < 0);
264 BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
265
266 /* For each SGE, create a device desc entry */ 263 /* For each SGE, create a device desc entry */
267 desc = io_req->sgl_list; 264 desc = io_req->sgl_list;
268 for_each_sg(scsi_sglist(sc), sg, sg_count, i) { 265 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
@@ -344,7 +341,7 @@ int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
344 struct fnic *fnic; 341 struct fnic *fnic;
345 struct vnic_wq_copy *wq; 342 struct vnic_wq_copy *wq;
346 int ret; 343 int ret;
347 u32 sg_count; 344 int sg_count;
348 unsigned long flags; 345 unsigned long flags;
349 unsigned long ptr; 346 unsigned long ptr;
350 347
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 869a11bdccbd..9928704e235f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1095,9 +1095,14 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1095 MAX_INDIRECT_BUFS); 1095 MAX_INDIRECT_BUFS);
1096 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; 1096 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1097 } 1097 }
1098
1099 if (hostdata->madapter_info.os_type == 3) {
1100 enable_fast_fail(hostdata);
1101 return;
1102 }
1098 } 1103 }
1099 1104
1100 enable_fast_fail(hostdata); 1105 send_srp_login(hostdata);
1101} 1106}
1102 1107
1103/** 1108/**
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 2bc22be5f849..145ab9ba55ea 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -415,9 +415,9 @@ static void fc_exch_timeout(struct work_struct *work)
415 e_stat = ep->esb_stat; 415 e_stat = ep->esb_stat;
416 if (e_stat & ESB_ST_COMPLETE) { 416 if (e_stat & ESB_ST_COMPLETE) {
417 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL; 417 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
418 spin_unlock_bh(&ep->ex_lock);
418 if (e_stat & ESB_ST_REC_QUAL) 419 if (e_stat & ESB_ST_REC_QUAL)
419 fc_exch_rrq(ep); 420 fc_exch_rrq(ep);
420 spin_unlock_bh(&ep->ex_lock);
421 goto done; 421 goto done;
422 } else { 422 } else {
423 resp = ep->resp; 423 resp = ep->resp;
@@ -1624,14 +1624,14 @@ static void fc_exch_rrq(struct fc_exch *ep)
1624 struct fc_lport *lp; 1624 struct fc_lport *lp;
1625 struct fc_els_rrq *rrq; 1625 struct fc_els_rrq *rrq;
1626 struct fc_frame *fp; 1626 struct fc_frame *fp;
1627 struct fc_seq *rrq_sp;
1628 u32 did; 1627 u32 did;
1629 1628
1630 lp = ep->lp; 1629 lp = ep->lp;
1631 1630
1632 fp = fc_frame_alloc(lp, sizeof(*rrq)); 1631 fp = fc_frame_alloc(lp, sizeof(*rrq));
1633 if (!fp) 1632 if (!fp)
1634 return; 1633 goto retry;
1634
1635 rrq = fc_frame_payload_get(fp, sizeof(*rrq)); 1635 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
1636 memset(rrq, 0, sizeof(*rrq)); 1636 memset(rrq, 0, sizeof(*rrq));
1637 rrq->rrq_cmd = ELS_RRQ; 1637 rrq->rrq_cmd = ELS_RRQ;
@@ -1647,13 +1647,20 @@ static void fc_exch_rrq(struct fc_exch *ep)
1647 fc_host_port_id(lp->host), FC_TYPE_ELS, 1647 fc_host_port_id(lp->host), FC_TYPE_ELS,
1648 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1648 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1649 1649
1650 rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, 1650 if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov))
1651 lp->e_d_tov); 1651 return;
1652 if (!rrq_sp) { 1652
1653 ep->esb_stat |= ESB_ST_REC_QUAL; 1653retry:
1654 fc_exch_timer_set_locked(ep, ep->r_a_tov); 1654 spin_lock_bh(&ep->ex_lock);
1655 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
1656 spin_unlock_bh(&ep->ex_lock);
1657 /* drop hold for rec qual */
1658 fc_exch_release(ep);
1655 return; 1659 return;
1656 } 1660 }
1661 ep->esb_stat |= ESB_ST_REC_QUAL;
1662 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1663 spin_unlock_bh(&ep->ex_lock);
1657} 1664}
1658 1665
1659 1666
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 716cc344c5df..a751f6230c22 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1974,10 +1974,10 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1974 * good and have never sent us a successful tmf response 1974 * good and have never sent us a successful tmf response
1975 * then sent more data for the cmd. 1975 * then sent more data for the cmd.
1976 */ 1976 */
1977 spin_lock(&session->lock); 1977 spin_lock_bh(&session->lock);
1978 fail_scsi_task(task, DID_ABORT); 1978 fail_scsi_task(task, DID_ABORT);
1979 conn->tmf_state = TMF_INITIAL; 1979 conn->tmf_state = TMF_INITIAL;
1980 spin_unlock(&session->lock); 1980 spin_unlock_bh(&session->lock);
1981 iscsi_start_tx(conn); 1981 iscsi_start_tx(conn);
1982 goto success_unlocked; 1982 goto success_unlocked;
1983 case TMF_TIMEDOUT: 1983 case TMF_TIMEDOUT:
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 54fa1e42dc4d..b3381959acce 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -766,6 +766,7 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
766 if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, 766 if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr,
767 SAS_ADDR_SIZE) && ephy->port) { 767 SAS_ADDR_SIZE) && ephy->port) {
768 sas_port_add_phy(ephy->port, phy->phy); 768 sas_port_add_phy(ephy->port, phy->phy);
769 phy->port = ephy->port;
769 phy->phy_state = PHY_DEVICE_DISCOVERED; 770 phy->phy_state = PHY_DEVICE_DISCOVERED;
770 return 0; 771 return 0;
771 } 772 }
@@ -945,11 +946,21 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
945 if (ex->ex_phy[i].phy_state == PHY_VACANT || 946 if (ex->ex_phy[i].phy_state == PHY_VACANT ||
946 ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) 947 ex->ex_phy[i].phy_state == PHY_NOT_PRESENT)
947 continue; 948 continue;
948 949 /*
950 * Due to races, the phy might not get added to the
951 * wide port, so we add the phy to the wide port here.
952 */
949 if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == 953 if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
950 SAS_ADDR(child->sas_addr)) 954 SAS_ADDR(child->sas_addr)) {
951 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; 955 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
956 res = sas_ex_join_wide_port(dev, i);
957 if (!res)
958 SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
959 i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
960
961 }
952 } 962 }
963 res = 0;
953 } 964 }
954 965
955 return res; 966 return res;
@@ -1598,7 +1609,7 @@ static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
1598} 1609}
1599 1610
1600static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, 1611static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1601 int from_phy) 1612 int from_phy, bool update)
1602{ 1613{
1603 struct expander_device *ex = &dev->ex_dev; 1614 struct expander_device *ex = &dev->ex_dev;
1604 int res = 0; 1615 int res = 0;
@@ -1611,7 +1622,9 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1611 if (res) 1622 if (res)
1612 goto out; 1623 goto out;
1613 else if (phy_change_count != ex->ex_phy[i].phy_change_count) { 1624 else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
1614 ex->ex_phy[i].phy_change_count = phy_change_count; 1625 if (update)
1626 ex->ex_phy[i].phy_change_count =
1627 phy_change_count;
1615 *phy_id = i; 1628 *phy_id = i;
1616 return 0; 1629 return 0;
1617 } 1630 }
@@ -1653,31 +1666,52 @@ out:
1653 kfree(rg_req); 1666 kfree(rg_req);
1654 return res; 1667 return res;
1655} 1668}
1669/**
1670 * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE).
1671 * @dev:domain device to be detect.
1672 * @src_dev: the device which originated BROADCAST(CHANGE).
1673 *
1674 * Add self-configuration expander suport. Suppose two expander cascading,
1675 * when the first level expander is self-configuring, hotplug the disks in
1676 * second level expander, BROADCAST(CHANGE) will not only be originated
1677 * in the second level expander, but also be originated in the first level
1678 * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say,
1679 * expander changed count in two level expanders will all increment at least
1680 * once, but the phy which chang count has changed is the source device which
1681 * we concerned.
1682 */
1656 1683
1657static int sas_find_bcast_dev(struct domain_device *dev, 1684static int sas_find_bcast_dev(struct domain_device *dev,
1658 struct domain_device **src_dev) 1685 struct domain_device **src_dev)
1659{ 1686{
1660 struct expander_device *ex = &dev->ex_dev; 1687 struct expander_device *ex = &dev->ex_dev;
1661 int ex_change_count = -1; 1688 int ex_change_count = -1;
1689 int phy_id = -1;
1662 int res; 1690 int res;
1691 struct domain_device *ch;
1663 1692
1664 res = sas_get_ex_change_count(dev, &ex_change_count); 1693 res = sas_get_ex_change_count(dev, &ex_change_count);
1665 if (res) 1694 if (res)
1666 goto out; 1695 goto out;
1667 if (ex_change_count != -1 && 1696 if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) {
1668 ex_change_count != ex->ex_change_count) { 1697 /* Just detect if this expander phys phy change count changed,
1669 *src_dev = dev; 1698 * in order to determine if this expander originate BROADCAST,
1670 ex->ex_change_count = ex_change_count; 1699 * and do not update phy change count field in our structure.
1671 } else { 1700 */
1672 struct domain_device *ch; 1701 res = sas_find_bcast_phy(dev, &phy_id, 0, false);
1673 1702 if (phy_id != -1) {
1674 list_for_each_entry(ch, &ex->children, siblings) { 1703 *src_dev = dev;
1675 if (ch->dev_type == EDGE_DEV || 1704 ex->ex_change_count = ex_change_count;
1676 ch->dev_type == FANOUT_DEV) { 1705 SAS_DPRINTK("Expander phy change count has changed\n");
1677 res = sas_find_bcast_dev(ch, src_dev); 1706 return res;
1678 if (src_dev) 1707 } else
1679 return res; 1708 SAS_DPRINTK("Expander phys DID NOT change\n");
1680 } 1709 }
1710 list_for_each_entry(ch, &ex->children, siblings) {
1711 if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
1712 res = sas_find_bcast_dev(ch, src_dev);
1713 if (src_dev)
1714 return res;
1681 } 1715 }
1682 } 1716 }
1683out: 1717out:
@@ -1700,24 +1734,26 @@ static void sas_unregister_ex_tree(struct domain_device *dev)
1700} 1734}
1701 1735
1702static void sas_unregister_devs_sas_addr(struct domain_device *parent, 1736static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1703 int phy_id) 1737 int phy_id, bool last)
1704{ 1738{
1705 struct expander_device *ex_dev = &parent->ex_dev; 1739 struct expander_device *ex_dev = &parent->ex_dev;
1706 struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; 1740 struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
1707 struct domain_device *child, *n; 1741 struct domain_device *child, *n;
1708 1742 if (last) {
1709 list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { 1743 list_for_each_entry_safe(child, n,
1710 if (SAS_ADDR(child->sas_addr) == 1744 &ex_dev->children, siblings) {
1711 SAS_ADDR(phy->attached_sas_addr)) { 1745 if (SAS_ADDR(child->sas_addr) ==
1712 if (child->dev_type == EDGE_DEV || 1746 SAS_ADDR(phy->attached_sas_addr)) {
1713 child->dev_type == FANOUT_DEV) 1747 if (child->dev_type == EDGE_DEV ||
1714 sas_unregister_ex_tree(child); 1748 child->dev_type == FANOUT_DEV)
1715 else 1749 sas_unregister_ex_tree(child);
1716 sas_unregister_dev(child); 1750 else
1717 break; 1751 sas_unregister_dev(child);
1752 break;
1753 }
1718 } 1754 }
1755 sas_disable_routing(parent, phy->attached_sas_addr);
1719 } 1756 }
1720 sas_disable_routing(parent, phy->attached_sas_addr);
1721 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 1757 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
1722 sas_port_delete_phy(phy->port, phy->phy); 1758 sas_port_delete_phy(phy->port, phy->phy);
1723 if (phy->port->num_phys == 0) 1759 if (phy->port->num_phys == 0)
@@ -1770,15 +1806,31 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
1770{ 1806{
1771 struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; 1807 struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
1772 struct domain_device *child; 1808 struct domain_device *child;
1773 int res; 1809 bool found = false;
1810 int res, i;
1774 1811
1775 SAS_DPRINTK("ex %016llx phy%d new device attached\n", 1812 SAS_DPRINTK("ex %016llx phy%d new device attached\n",
1776 SAS_ADDR(dev->sas_addr), phy_id); 1813 SAS_ADDR(dev->sas_addr), phy_id);
1777 res = sas_ex_phy_discover(dev, phy_id); 1814 res = sas_ex_phy_discover(dev, phy_id);
1778 if (res) 1815 if (res)
1779 goto out; 1816 goto out;
1817 /* to support the wide port inserted */
1818 for (i = 0; i < dev->ex_dev.num_phys; i++) {
1819 struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
1820 if (i == phy_id)
1821 continue;
1822 if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
1823 SAS_ADDR(ex_phy->attached_sas_addr)) {
1824 found = true;
1825 break;
1826 }
1827 }
1828 if (found) {
1829 sas_ex_join_wide_port(dev, phy_id);
1830 return 0;
1831 }
1780 res = sas_ex_discover_devices(dev, phy_id); 1832 res = sas_ex_discover_devices(dev, phy_id);
1781 if (res) 1833 if (!res)
1782 goto out; 1834 goto out;
1783 list_for_each_entry(child, &dev->ex_dev.children, siblings) { 1835 list_for_each_entry(child, &dev->ex_dev.children, siblings) {
1784 if (SAS_ADDR(child->sas_addr) == 1836 if (SAS_ADDR(child->sas_addr) ==
@@ -1793,7 +1845,7 @@ out:
1793 return res; 1845 return res;
1794} 1846}
1795 1847
1796static int sas_rediscover_dev(struct domain_device *dev, int phy_id) 1848static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
1797{ 1849{
1798 struct expander_device *ex = &dev->ex_dev; 1850 struct expander_device *ex = &dev->ex_dev;
1799 struct ex_phy *phy = &ex->ex_phy[phy_id]; 1851 struct ex_phy *phy = &ex->ex_phy[phy_id];
@@ -1804,11 +1856,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id)
1804 switch (res) { 1856 switch (res) {
1805 case SMP_RESP_NO_PHY: 1857 case SMP_RESP_NO_PHY:
1806 phy->phy_state = PHY_NOT_PRESENT; 1858 phy->phy_state = PHY_NOT_PRESENT;
1807 sas_unregister_devs_sas_addr(dev, phy_id); 1859 sas_unregister_devs_sas_addr(dev, phy_id, last);
1808 goto out; break; 1860 goto out; break;
1809 case SMP_RESP_PHY_VACANT: 1861 case SMP_RESP_PHY_VACANT:
1810 phy->phy_state = PHY_VACANT; 1862 phy->phy_state = PHY_VACANT;
1811 sas_unregister_devs_sas_addr(dev, phy_id); 1863 sas_unregister_devs_sas_addr(dev, phy_id, last);
1812 goto out; break; 1864 goto out; break;
1813 case SMP_RESP_FUNC_ACC: 1865 case SMP_RESP_FUNC_ACC:
1814 break; 1866 break;
@@ -1816,7 +1868,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id)
1816 1868
1817 if (SAS_ADDR(attached_sas_addr) == 0) { 1869 if (SAS_ADDR(attached_sas_addr) == 0) {
1818 phy->phy_state = PHY_EMPTY; 1870 phy->phy_state = PHY_EMPTY;
1819 sas_unregister_devs_sas_addr(dev, phy_id); 1871 sas_unregister_devs_sas_addr(dev, phy_id, last);
1820 } else if (SAS_ADDR(attached_sas_addr) == 1872 } else if (SAS_ADDR(attached_sas_addr) ==
1821 SAS_ADDR(phy->attached_sas_addr)) { 1873 SAS_ADDR(phy->attached_sas_addr)) {
1822 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n", 1874 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n",
@@ -1828,12 +1880,27 @@ out:
1828 return res; 1880 return res;
1829} 1881}
1830 1882
1883/**
1884 * sas_rediscover - revalidate the domain.
1885 * @dev:domain device to be detect.
1886 * @phy_id: the phy id will be detected.
1887 *
1888 * NOTE: this process _must_ quit (return) as soon as any connection
1889 * errors are encountered. Connection recovery is done elsewhere.
1890 * Discover process only interrogates devices in order to discover the
1891 * domain.For plugging out, we un-register the device only when it is
1892 * the last phy in the port, for other phys in this port, we just delete it
1893 * from the port.For inserting, we do discovery when it is the
1894 * first phy,for other phys in this port, we add it to the port to
1895 * forming the wide-port.
1896 */
1831static int sas_rediscover(struct domain_device *dev, const int phy_id) 1897static int sas_rediscover(struct domain_device *dev, const int phy_id)
1832{ 1898{
1833 struct expander_device *ex = &dev->ex_dev; 1899 struct expander_device *ex = &dev->ex_dev;
1834 struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; 1900 struct ex_phy *changed_phy = &ex->ex_phy[phy_id];
1835 int res = 0; 1901 int res = 0;
1836 int i; 1902 int i;
1903 bool last = true; /* is this the last phy of the port */
1837 1904
1838 SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", 1905 SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
1839 SAS_ADDR(dev->sas_addr), phy_id); 1906 SAS_ADDR(dev->sas_addr), phy_id);
@@ -1848,13 +1915,13 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id)
1848 SAS_ADDR(changed_phy->attached_sas_addr)) { 1915 SAS_ADDR(changed_phy->attached_sas_addr)) {
1849 SAS_DPRINTK("phy%d part of wide port with " 1916 SAS_DPRINTK("phy%d part of wide port with "
1850 "phy%d\n", phy_id, i); 1917 "phy%d\n", phy_id, i);
1851 goto out; 1918 last = false;
1919 break;
1852 } 1920 }
1853 } 1921 }
1854 res = sas_rediscover_dev(dev, phy_id); 1922 res = sas_rediscover_dev(dev, phy_id, last);
1855 } else 1923 } else
1856 res = sas_discover_new(dev, phy_id); 1924 res = sas_discover_new(dev, phy_id);
1857out:
1858 return res; 1925 return res;
1859} 1926}
1860 1927
@@ -1881,7 +1948,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
1881 1948
1882 do { 1949 do {
1883 phy_id = -1; 1950 phy_id = -1;
1884 res = sas_find_bcast_phy(dev, &phy_id, i); 1951 res = sas_find_bcast_phy(dev, &phy_id, i, true);
1885 if (phy_id == -1) 1952 if (phy_id == -1)
1886 break; 1953 break;
1887 res = sas_rediscover(dev, phy_id); 1954 res = sas_rediscover(dev, phy_id);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index e6ac59c023f1..fe8b74c706d2 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -56,7 +56,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
56 } 56 }
57 } 57 }
58 58
59 /* find a port */ 59 /* see if the phy should be part of a wide port */
60 spin_lock_irqsave(&sas_ha->phy_port_lock, flags); 60 spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
61 for (i = 0; i < sas_ha->num_phys; i++) { 61 for (i = 0; i < sas_ha->num_phys; i++) {
62 port = sas_ha->sas_port[i]; 62 port = sas_ha->sas_port[i];
@@ -69,12 +69,23 @@ static void sas_form_port(struct asd_sas_phy *phy)
69 SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, 69 SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
70 port->id); 70 port->id);
71 break; 71 break;
72 } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) {
73 memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
74 break;
75 } 72 }
76 spin_unlock(&port->phy_list_lock); 73 spin_unlock(&port->phy_list_lock);
77 } 74 }
75 /* The phy does not match any existing port, create a new one */
76 if (i == sas_ha->num_phys) {
77 for (i = 0; i < sas_ha->num_phys; i++) {
78 port = sas_ha->sas_port[i];
79 spin_lock(&port->phy_list_lock);
80 if (*(u64 *)port->sas_addr == 0
81 && port->num_phys == 0) {
82 memcpy(port->sas_addr, phy->sas_addr,
83 SAS_ADDR_SIZE);
84 break;
85 }
86 spin_unlock(&port->phy_list_lock);
87 }
88 }
78 89
79 if (i >= sas_ha->num_phys) { 90 if (i >= sas_ha->num_phys) {
80 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", 91 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index b12ad7c7c673..18735b39b3d3 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -75,8 +75,9 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *
75 int i; 75 int i;
76 printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd); 76 printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd);
77 for (i = 0; i < cmd->cmd_len; ++i) 77 for (i = 0; i < cmd->cmd_len; ++i)
78 printk(" %.2x", cmd->cmnd[i]); 78 printk(KERN_CONT " %.2x", cmd->cmnd[i]);
79 printk("\n" KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n", 79 printk(KERN_CONT "\n");
80 printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
80 scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd)); 81 scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd));
81 } 82 }
82#endif 83#endif
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 650bcef08f2a..cd78c501803a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/moduleparam.h> 10#include <linux/moduleparam.h>
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/smp_lock.h>
13#include <linux/list.h> 12#include <linux/list.h>
14 13
15#include <scsi/scsi_tcq.h> 14#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index fcc184cd066d..cbceb0ebabf7 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -15,19 +15,18 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
15 uint32_t cnt; 15 uint32_t cnt;
16 uint8_t *c = b; 16 uint8_t *c = b;
17 17
18 printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh " 18 printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh "
19 "Fh\n"); 19 "Fh\n");
20 printk("------------------------------------------------------------" 20 printk("------------------------------------------------------------"
21 "--\n"); 21 "--\n");
22 for (cnt = 0; cnt < size; cnt++, c++) { 22 for (cnt = 0; cnt < size; c++) {
23 printk(KERN_DEBUG "%02x", *c); 23 printk(KERN_INFO "%02x", *c);
24 if (!(cnt % 16)) 24 if (!(++cnt % 16))
25 printk(KERN_DEBUG "\n"); 25 printk(KERN_INFO "\n");
26 26
27 else 27 else
28 printk(KERN_DEBUG " "); 28 printk(KERN_INFO " ");
29 } 29 }
30 if (cnt % 16) 30 printk(KERN_INFO "\n");
31 printk(KERN_DEBUG "\n");
32} 31}
33 32
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index b586f27c3bd4..81b5f29254e2 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -100,7 +100,6 @@
100#define MAX_SRBS MAX_CMDS_TO_RISC 100#define MAX_SRBS MAX_CMDS_TO_RISC
101#define MBOX_AEN_REG_COUNT 5 101#define MBOX_AEN_REG_COUNT 5
102#define MAX_INIT_RETRIES 5 102#define MAX_INIT_RETRIES 5
103#define IOCB_HIWAT_CUSHION 16
104 103
105/* 104/*
106 * Buffer sizes 105 * Buffer sizes
@@ -184,6 +183,11 @@ struct srb {
184 uint16_t cc_stat; 183 uint16_t cc_stat;
185 u_long r_start; /* Time we recieve a cmd from OS */ 184 u_long r_start; /* Time we recieve a cmd from OS */
186 u_long u_start; /* Time when we handed the cmd to F/W */ 185 u_long u_start; /* Time when we handed the cmd to F/W */
186
187 /* Used for extended sense / status continuation */
188 uint8_t *req_sense_ptr;
189 uint16_t req_sense_len;
190 uint16_t reserved2;
187}; 191};
188 192
189/* 193/*
@@ -302,7 +306,6 @@ struct scsi_qla_host {
302 uint32_t tot_ddbs; 306 uint32_t tot_ddbs;
303 307
304 uint16_t iocb_cnt; 308 uint16_t iocb_cnt;
305 uint16_t iocb_hiwat;
306 309
307 /* SRB cache. */ 310 /* SRB cache. */
308#define SRB_MIN_REQ 128 311#define SRB_MIN_REQ 128
@@ -436,6 +439,8 @@ struct scsi_qla_host {
436 /* Map ddb_list entry by FW ddb index */ 439 /* Map ddb_list entry by FW ddb index */
437 struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES]; 440 struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
438 441
442 /* Saved srb for status continuation entry processing */
443 struct srb *status_srb;
439}; 444};
440 445
441static inline int is_qla4010(struct scsi_qla_host *ha) 446static inline int is_qla4010(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 1b667a70cffa..9cd7a608df38 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -572,6 +572,7 @@ struct conn_event_log_entry {
572 *************************************************************************/ 572 *************************************************************************/
573#define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */ 573#define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */
574#define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */ 574#define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */
575#define IOCB_MAX_EXT_SENSEDATA_LEN 60 /* Bytes of extended sense data */
575 576
576/* IOCB header structure */ 577/* IOCB header structure */
577struct qla4_header { 578struct qla4_header {
@@ -733,6 +734,12 @@ struct status_entry {
733 734
734}; 735};
735 736
737/* Status Continuation entry */
738struct status_cont_entry {
739 struct qla4_header hdr; /* 00-03 */
740 uint8_t ext_sense_data[IOCB_MAX_EXT_SENSEDATA_LEN]; /* 04-63 */
741};
742
736struct passthru0 { 743struct passthru0 {
737 struct qla4_header hdr; /* 00-03 */ 744 struct qla4_header hdr; /* 00-03 */
738 uint32_t handle; /* 04-07 */ 745 uint32_t handle; /* 04-07 */
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 912a67494adf..e0c32159749c 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -10,9 +10,42 @@
10#include "ql4_dbg.h" 10#include "ql4_dbg.h"
11#include "ql4_inline.h" 11#include "ql4_inline.h"
12 12
13
14#include <scsi/scsi_tcq.h> 13#include <scsi/scsi_tcq.h>
15 14
15static int
16qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
17{
18 uint16_t cnt;
19
20 /* Calculate number of free request entries. */
21 if ((req_cnt + 2) >= ha->req_q_count) {
22 cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
23 if (ha->request_in < cnt)
24 ha->req_q_count = cnt - ha->request_in;
25 else
26 ha->req_q_count = REQUEST_QUEUE_DEPTH -
27 (ha->request_in - cnt);
28 }
29
30 /* Check if room for request in request ring. */
31 if ((req_cnt + 2) < ha->req_q_count)
32 return 1;
33 else
34 return 0;
35}
36
37static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
38{
39 /* Advance request queue pointer */
40 if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
41 ha->request_in = 0;
42 ha->request_ptr = ha->request_ring;
43 } else {
44 ha->request_in++;
45 ha->request_ptr++;
46 }
47}
48
16/** 49/**
17 * qla4xxx_get_req_pkt - returns a valid entry in request queue. 50 * qla4xxx_get_req_pkt - returns a valid entry in request queue.
18 * @ha: Pointer to host adapter structure. 51 * @ha: Pointer to host adapter structure.
@@ -26,35 +59,18 @@
26static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, 59static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
27 struct queue_entry **queue_entry) 60 struct queue_entry **queue_entry)
28{ 61{
29 uint16_t request_in; 62 uint16_t req_cnt = 1;
30 uint8_t status = QLA_SUCCESS;
31
32 *queue_entry = ha->request_ptr;
33 63
34 /* get the latest request_in and request_out index */ 64 if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
35 request_in = ha->request_in; 65 *queue_entry = ha->request_ptr;
36 ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
37
38 /* Advance request queue pointer and check for queue full */
39 if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
40 request_in = 0;
41 ha->request_ptr = ha->request_ring;
42 } else {
43 request_in++;
44 ha->request_ptr++;
45 }
46
47 /* request queue is full, try again later */
48 if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
49 /* restore request pointer */
50 ha->request_ptr = *queue_entry;
51 status = QLA_ERROR;
52 } else {
53 ha->request_in = request_in;
54 memset(*queue_entry, 0, sizeof(**queue_entry)); 66 memset(*queue_entry, 0, sizeof(**queue_entry));
67
68 qla4xxx_advance_req_ring_ptr(ha);
69 ha->req_q_count -= req_cnt;
70 return QLA_SUCCESS;
55 } 71 }
56 72
57 return status; 73 return QLA_ERROR;
58} 74}
59 75
60/** 76/**
@@ -100,21 +116,14 @@ exit_send_marker:
100 return status; 116 return status;
101} 117}
102 118
103static struct continuation_t1_entry* qla4xxx_alloc_cont_entry( 119static struct continuation_t1_entry *
104 struct scsi_qla_host *ha) 120qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
105{ 121{
106 struct continuation_t1_entry *cont_entry; 122 struct continuation_t1_entry *cont_entry;
107 123
108 cont_entry = (struct continuation_t1_entry *)ha->request_ptr; 124 cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
109 125
110 /* Advance request queue pointer */ 126 qla4xxx_advance_req_ring_ptr(ha);
111 if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
112 ha->request_in = 0;
113 ha->request_ptr = ha->request_ring;
114 } else {
115 ha->request_in++;
116 ha->request_ptr++;
117 }
118 127
119 /* Load packet defaults */ 128 /* Load packet defaults */
120 cont_entry->hdr.entryType = ET_CONTINUE; 129 cont_entry->hdr.entryType = ET_CONTINUE;
@@ -197,13 +206,10 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
197 struct scsi_cmnd *cmd = srb->cmd; 206 struct scsi_cmnd *cmd = srb->cmd;
198 struct ddb_entry *ddb_entry; 207 struct ddb_entry *ddb_entry;
199 struct command_t3_entry *cmd_entry; 208 struct command_t3_entry *cmd_entry;
200
201 int nseg; 209 int nseg;
202 uint16_t tot_dsds; 210 uint16_t tot_dsds;
203 uint16_t req_cnt; 211 uint16_t req_cnt;
204
205 unsigned long flags; 212 unsigned long flags;
206 uint16_t cnt;
207 uint32_t index; 213 uint32_t index;
208 char tag[2]; 214 char tag[2];
209 215
@@ -217,6 +223,19 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
217 223
218 index = (uint32_t)cmd->request->tag; 224 index = (uint32_t)cmd->request->tag;
219 225
226 /*
227 * Check to see if adapter is online before placing request on
228 * request queue. If a reset occurs and a request is in the queue,
229 * the firmware will still attempt to process the request, retrieving
230 * garbage for pointers.
231 */
232 if (!test_bit(AF_ONLINE, &ha->flags)) {
233 DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
234 "Do not issue command.\n",
235 ha->host_no, __func__));
236 goto queuing_error;
237 }
238
220 /* Calculate the number of request entries needed. */ 239 /* Calculate the number of request entries needed. */
221 nseg = scsi_dma_map(cmd); 240 nseg = scsi_dma_map(cmd);
222 if (nseg < 0) 241 if (nseg < 0)
@@ -224,17 +243,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
224 tot_dsds = nseg; 243 tot_dsds = nseg;
225 244
226 req_cnt = qla4xxx_calc_request_entries(tot_dsds); 245 req_cnt = qla4xxx_calc_request_entries(tot_dsds);
227 246 if (!qla4xxx_space_in_req_ring(ha, req_cnt))
228 if (ha->req_q_count < (req_cnt + 2)) {
229 cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
230 if (ha->request_in < cnt)
231 ha->req_q_count = cnt - ha->request_in;
232 else
233 ha->req_q_count = REQUEST_QUEUE_DEPTH -
234 (ha->request_in - cnt);
235 }
236
237 if (ha->req_q_count < (req_cnt + 2))
238 goto queuing_error; 247 goto queuing_error;
239 248
240 /* total iocbs active */ 249 /* total iocbs active */
@@ -286,32 +295,10 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
286 break; 295 break;
287 } 296 }
288 297
289 298 qla4xxx_advance_req_ring_ptr(ha);
290 /* Advance request queue pointer */
291 ha->request_in++;
292 if (ha->request_in == REQUEST_QUEUE_DEPTH) {
293 ha->request_in = 0;
294 ha->request_ptr = ha->request_ring;
295 } else
296 ha->request_ptr++;
297
298
299 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); 299 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
300 wmb(); 300 wmb();
301 301
302 /*
303 * Check to see if adapter is online before placing request on
304 * request queue. If a reset occurs and a request is in the queue,
305 * the firmware will still attempt to process the request, retrieving
306 * garbage for pointers.
307 */
308 if (!test_bit(AF_ONLINE, &ha->flags)) {
309 DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
310 "Do not issue command.\n",
311 ha->host_no, __func__));
312 goto queuing_error;
313 }
314
315 srb->cmd->host_scribble = (unsigned char *)srb; 302 srb->cmd->host_scribble = (unsigned char *)srb;
316 303
317 /* update counters */ 304 /* update counters */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 799120fcb9be..8025ee16588e 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -11,6 +11,98 @@
11#include "ql4_inline.h" 11#include "ql4_inline.h"
12 12
13/** 13/**
14 * qla4xxx_copy_sense - copy sense data into cmd sense buffer
15 * @ha: Pointer to host adapter structure.
16 * @sts_entry: Pointer to status entry structure.
17 * @srb: Pointer to srb structure.
18 **/
19static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
20 struct status_entry *sts_entry,
21 struct srb *srb)
22{
23 struct scsi_cmnd *cmd = srb->cmd;
24 uint16_t sense_len;
25
26 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
27 sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
28 if (sense_len == 0)
29 return;
30
31 /* Save total available sense length,
32 * not to exceed cmd's sense buffer size */
33 sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
34 srb->req_sense_ptr = cmd->sense_buffer;
35 srb->req_sense_len = sense_len;
36
37 /* Copy sense from sts_entry pkt */
38 sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
39 memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
40
41 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, "
42 "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
43 cmd->device->channel, cmd->device->id,
44 cmd->device->lun, __func__,
45 sts_entry->senseData[2] & 0x0f,
46 sts_entry->senseData[7],
47 sts_entry->senseData[12],
48 sts_entry->senseData[13]));
49
50 DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
51 srb->flags |= SRB_GOT_SENSE;
52
53 /* Update srb, in case a sts_cont pkt follows */
54 srb->req_sense_ptr += sense_len;
55 srb->req_sense_len -= sense_len;
56 if (srb->req_sense_len != 0)
57 ha->status_srb = srb;
58 else
59 ha->status_srb = NULL;
60}
61
62/**
63 * qla4xxx_status_cont_entry - Process a Status Continuations entry.
64 * @ha: SCSI driver HA context
65 * @sts_cont: Entry pointer
66 *
67 * Extended sense data.
68 */
69static void
70qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
71 struct status_cont_entry *sts_cont)
72{
73 struct srb *srb = ha->status_srb;
74 struct scsi_cmnd *cmd;
75 uint8_t sense_len;
76
77 if (srb == NULL)
78 return;
79
80 cmd = srb->cmd;
81 if (cmd == NULL) {
82 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
83 "back to OS srb=%p srb->state:%d\n", ha->host_no,
84 __func__, srb, srb->state));
85 ha->status_srb = NULL;
86 return;
87 }
88
89 /* Copy sense data. */
90 sense_len = min_t(uint16_t, srb->req_sense_len,
91 IOCB_MAX_EXT_SENSEDATA_LEN);
92 memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
93 DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
94
95 srb->req_sense_ptr += sense_len;
96 srb->req_sense_len -= sense_len;
97
98 /* Place command on done queue. */
99 if (srb->req_sense_len == 0) {
100 qla4xxx_srb_compl(ha, srb);
101 ha->status_srb = NULL;
102 }
103}
104
105/**
14 * qla4xxx_status_entry - processes status IOCBs 106 * qla4xxx_status_entry - processes status IOCBs
15 * @ha: Pointer to host adapter structure. 107 * @ha: Pointer to host adapter structure.
16 * @sts_entry: Pointer to status entry structure. 108 * @sts_entry: Pointer to status entry structure.
@@ -23,7 +115,6 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
23 struct srb *srb; 115 struct srb *srb;
24 struct ddb_entry *ddb_entry; 116 struct ddb_entry *ddb_entry;
25 uint32_t residual; 117 uint32_t residual;
26 uint16_t sensebytecnt;
27 118
28 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); 119 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
29 if (!srb) { 120 if (!srb) {
@@ -92,24 +183,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
92 break; 183 break;
93 184
94 /* Copy Sense Data into sense buffer. */ 185 /* Copy Sense Data into sense buffer. */
95 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 186 qla4xxx_copy_sense(ha, sts_entry, srb);
96
97 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
98 if (sensebytecnt == 0)
99 break;
100
101 memcpy(cmd->sense_buffer, sts_entry->senseData,
102 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
103
104 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
105 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
106 cmd->device->channel, cmd->device->id,
107 cmd->device->lun, __func__,
108 sts_entry->senseData[2] & 0x0f,
109 sts_entry->senseData[12],
110 sts_entry->senseData[13]));
111
112 srb->flags |= SRB_GOT_SENSE;
113 break; 187 break;
114 188
115 case SCS_INCOMPLETE: 189 case SCS_INCOMPLETE:
@@ -176,23 +250,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
176 break; 250 break;
177 251
178 /* Copy Sense Data into sense buffer. */ 252 /* Copy Sense Data into sense buffer. */
179 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 253 qla4xxx_copy_sense(ha, sts_entry, srb);
180
181 sensebytecnt =
182 le16_to_cpu(sts_entry->senseDataByteCnt);
183 if (sensebytecnt == 0)
184 break;
185
186 memcpy(cmd->sense_buffer, sts_entry->senseData,
187 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
188
189 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
190 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
191 cmd->device->channel, cmd->device->id,
192 cmd->device->lun, __func__,
193 sts_entry->senseData[2] & 0x0f,
194 sts_entry->senseData[12],
195 sts_entry->senseData[13]));
196 } else { 254 } else {
197 /* 255 /*
198 * If RISC reports underrun and target does not 256 * If RISC reports underrun and target does not
@@ -268,9 +326,10 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
268 326
269status_entry_exit: 327status_entry_exit:
270 328
271 /* complete the request */ 329 /* complete the request, if not waiting for status_continuation pkt */
272 srb->cc_stat = sts_entry->completionStatus; 330 srb->cc_stat = sts_entry->completionStatus;
273 qla4xxx_srb_compl(ha, srb); 331 if (ha->status_srb == NULL)
332 qla4xxx_srb_compl(ha, srb);
274} 333}
275 334
276/** 335/**
@@ -305,10 +364,7 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
305 /* process entry */ 364 /* process entry */
306 switch (sts_entry->hdr.entryType) { 365 switch (sts_entry->hdr.entryType) {
307 case ET_STATUS: 366 case ET_STATUS:
308 /* 367 /* Common status */
309 * Common status - Single completion posted in single
310 * IOSB.
311 */
312 qla4xxx_status_entry(ha, sts_entry); 368 qla4xxx_status_entry(ha, sts_entry);
313 break; 369 break;
314 370
@@ -316,9 +372,8 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
316 break; 372 break;
317 373
318 case ET_STATUS_CONTINUATION: 374 case ET_STATUS_CONTINUATION:
319 /* Just throw away the status continuation entries */ 375 qla4xxx_status_cont_entry(ha,
320 DEBUG2(printk("scsi%ld: %s: Status Continuation entry " 376 (struct status_cont_entry *) sts_entry);
321 "- ignoring\n", ha->host_no, __func__));
322 break; 377 break;
323 378
324 case ET_COMMAND: 379 case ET_COMMAND:
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 051b0f5e8c8e..09d6d4b76f39 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -385,16 +385,6 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
385 mbox_sts[0])); 385 mbox_sts[0]));
386 return QLA_ERROR; 386 return QLA_ERROR;
387 } 387 }
388
389 /* High-water mark of IOCBs */
390 ha->iocb_hiwat = mbox_sts[2];
391 if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
392 ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
393 else
394 dev_info(&ha->pdev->dev, "WARNING!!! You have less than %d "
395 "firmware IOCBs available (%d).\n",
396 IOCB_HIWAT_CUSHION, ha->iocb_hiwat);
397
398 return QLA_SUCCESS; 388 return QLA_SUCCESS;
399} 389}
400 390
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ec9da6ce8489..40e3cafb3a9c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -66,6 +66,7 @@ static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
66static int qla4xxx_host_get_param(struct Scsi_Host *shost, 66static int qla4xxx_host_get_param(struct Scsi_Host *shost,
67 enum iscsi_host_param param, char *buf); 67 enum iscsi_host_param param, char *buf);
68static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); 68static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
69static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
69 70
70/* 71/*
71 * SCSI host template entry points 72 * SCSI host template entry points
@@ -89,6 +90,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
89 .eh_device_reset_handler = qla4xxx_eh_device_reset, 90 .eh_device_reset_handler = qla4xxx_eh_device_reset,
90 .eh_target_reset_handler = qla4xxx_eh_target_reset, 91 .eh_target_reset_handler = qla4xxx_eh_target_reset,
91 .eh_host_reset_handler = qla4xxx_eh_host_reset, 92 .eh_host_reset_handler = qla4xxx_eh_host_reset,
93 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
92 94
93 .slave_configure = qla4xxx_slave_configure, 95 .slave_configure = qla4xxx_slave_configure,
94 .slave_alloc = qla4xxx_slave_alloc, 96 .slave_alloc = qla4xxx_slave_alloc,
@@ -124,6 +126,21 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
124 126
125static struct scsi_transport_template *qla4xxx_scsi_transport; 127static struct scsi_transport_template *qla4xxx_scsi_transport;
126 128
129static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
130{
131 struct iscsi_cls_session *session;
132 struct ddb_entry *ddb_entry;
133
134 session = starget_to_session(scsi_target(sc->device));
135 ddb_entry = session->dd_data;
136
137 /* if we are not logged in then the LLD is going to clean up the cmd */
138 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
139 return BLK_EH_RESET_TIMER;
140 else
141 return BLK_EH_NOT_HANDLED;
142}
143
127static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) 144static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
128{ 145{
129 struct ddb_entry *ddb_entry = session->dd_data; 146 struct ddb_entry *ddb_entry = session->dd_data;
@@ -904,18 +921,17 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
904 /* Flush any pending ddb changed AENs */ 921 /* Flush any pending ddb changed AENs */
905 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 922 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
906 923
924 qla4xxx_flush_active_srbs(ha);
925
907 /* Reset the firmware. If successful, function 926 /* Reset the firmware. If successful, function
908 * returns with ISP interrupts enabled. 927 * returns with ISP interrupts enabled.
909 */ 928 */
910 if (status == QLA_SUCCESS) { 929 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n",
911 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n", 930 ha->host_no, __func__));
912 ha->host_no, __func__)); 931 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
913 qla4xxx_flush_active_srbs(ha); 932 status = qla4xxx_soft_reset(ha);
914 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) 933 else
915 status = qla4xxx_soft_reset(ha); 934 status = QLA_ERROR;
916 else
917 status = QLA_ERROR;
918 }
919 935
920 /* Flush any pending ddb changed AENs */ 936 /* Flush any pending ddb changed AENs */
921 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 937 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
@@ -1527,11 +1543,9 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1527{ 1543{
1528 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 1544 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
1529 struct ddb_entry *ddb_entry = cmd->device->hostdata; 1545 struct ddb_entry *ddb_entry = cmd->device->hostdata;
1530 struct srb *sp;
1531 int ret = FAILED, stat; 1546 int ret = FAILED, stat;
1532 1547
1533 sp = (struct srb *) cmd->SCp.ptr; 1548 if (!ddb_entry)
1534 if (!sp || !ddb_entry)
1535 return ret; 1549 return ret;
1536 1550
1537 dev_info(&ha->pdev->dev, 1551 dev_info(&ha->pdev->dev,
@@ -1644,7 +1658,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
1644 ha = (struct scsi_qla_host *) cmd->device->host->hostdata; 1658 ha = (struct scsi_qla_host *) cmd->device->host->hostdata;
1645 1659
1646 dev_info(&ha->pdev->dev, 1660 dev_info(&ha->pdev->dev,
1647 "scsi(%ld:%d:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, 1661 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
1648 cmd->device->channel, cmd->device->id, cmd->device->lun); 1662 cmd->device->channel, cmd->device->id, cmd->device->lun);
1649 1663
1650 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { 1664 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index ab984cb89cea..6980cb279c81 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,5 +5,5 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.01.00-k8" 8#define QLA4XXX_DRIVER_VERSION "5.01.00-k9"
9 9
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2eee9e6e4fe8..292c02f810d0 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3670,13 +3670,14 @@ static void
3670fc_bsg_goose_queue(struct fc_rport *rport) 3670fc_bsg_goose_queue(struct fc_rport *rport)
3671{ 3671{
3672 int flagset; 3672 int flagset;
3673 unsigned long flags;
3673 3674
3674 if (!rport->rqst_q) 3675 if (!rport->rqst_q)
3675 return; 3676 return;
3676 3677
3677 get_device(&rport->dev); 3678 get_device(&rport->dev);
3678 3679
3679 spin_lock(rport->rqst_q->queue_lock); 3680 spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
3680 flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && 3681 flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
3681 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3682 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3682 if (flagset) 3683 if (flagset)
@@ -3684,7 +3685,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
3684 __blk_run_queue(rport->rqst_q); 3685 __blk_run_queue(rport->rqst_q);
3685 if (flagset) 3686 if (flagset)
3686 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3687 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3687 spin_unlock(rport->rqst_q->queue_lock); 3688 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
3688 3689
3689 put_device(&rport->dev); 3690 put_device(&rport->dev);
3690} 3691}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 783e33c65eb7..b47240ca4b19 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -990,7 +990,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
990 struct iscsi_uevent *ev; 990 struct iscsi_uevent *ev;
991 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 991 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
992 992
993 skb = alloc_skb(len, GFP_NOIO); 993 skb = alloc_skb(len, GFP_ATOMIC);
994 if (!skb) { 994 if (!skb) {
995 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); 995 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
996 return -ENOMEM; 996 return -ENOMEM;
@@ -1012,7 +1012,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
1012 1012
1013 memcpy((char *)ev + sizeof(*ev), data, data_size); 1013 memcpy((char *)ev + sizeof(*ev), data, data_size);
1014 1014
1015 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO); 1015 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC);
1016} 1016}
1017EXPORT_SYMBOL_GPL(iscsi_offload_mesg); 1017EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
1018 1018
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5616cd780ff3..b7b9fec67a98 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1840,6 +1840,18 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
1840 kfree(buffer); 1840 kfree(buffer);
1841} 1841}
1842 1842
1843static int sd_try_extended_inquiry(struct scsi_device *sdp)
1844{
1845 /*
1846 * Although VPD inquiries can go to SCSI-2 type devices,
1847 * some USB ones crash on receiving them, and the pages
1848 * we currently ask for are for SPC-3 and beyond
1849 */
1850 if (sdp->scsi_level > SCSI_SPC_2)
1851 return 1;
1852 return 0;
1853}
1854
1843/** 1855/**
1844 * sd_revalidate_disk - called the first time a new disk is seen, 1856 * sd_revalidate_disk - called the first time a new disk is seen,
1845 * performs disk spin up, read_capacity, etc. 1857 * performs disk spin up, read_capacity, etc.
@@ -1877,8 +1889,12 @@ static int sd_revalidate_disk(struct gendisk *disk)
1877 */ 1889 */
1878 if (sdkp->media_present) { 1890 if (sdkp->media_present) {
1879 sd_read_capacity(sdkp, buffer); 1891 sd_read_capacity(sdkp, buffer);
1880 sd_read_block_limits(sdkp); 1892
1881 sd_read_block_characteristics(sdkp); 1893 if (sd_try_extended_inquiry(sdp)) {
1894 sd_read_block_limits(sdkp);
1895 sd_read_block_characteristics(sdkp);
1896 }
1897
1882 sd_read_write_protect_flag(sdkp, buffer); 1898 sd_read_write_protect_flag(sdkp, buffer);
1883 sd_read_cache_type(sdkp, buffer); 1899 sd_read_cache_type(sdkp, buffer);
1884 sd_read_app_tag_own(sdkp, buffer); 1900 sd_read_app_tag_own(sdkp, buffer);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8201387b4daa..9230402c45af 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -210,13 +210,11 @@ static void sg_put_dev(Sg_device *sdp);
210static int sg_allow_access(struct file *filp, unsigned char *cmd) 210static int sg_allow_access(struct file *filp, unsigned char *cmd)
211{ 211{
212 struct sg_fd *sfp = (struct sg_fd *)filp->private_data; 212 struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
213 struct request_queue *q = sfp->parentdp->device->request_queue;
214 213
215 if (sfp->parentdp->device->type == TYPE_SCANNER) 214 if (sfp->parentdp->device->type == TYPE_SCANNER)
216 return 0; 215 return 0;
217 216
218 return blk_verify_command(&q->cmd_filter, 217 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
219 cmd, filp->f_mode & FMODE_WRITE);
220} 218}
221 219
222static int 220static int
@@ -621,7 +619,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
621 if (strcmp(current->comm, cmd) && printk_ratelimit()) { 619 if (strcmp(current->comm, cmd) && printk_ratelimit()) {
622 printk(KERN_WARNING 620 printk(KERN_WARNING
623 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--" 621 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
624 "guessing data in;\n" KERN_WARNING " " 622 "guessing data in;\n "
625 "program %s not setting count and/or reply_len properly\n", 623 "program %s not setting count and/or reply_len properly\n",
626 old_hdr.reply_len - (int)SZ_SG_HEADER, 624 old_hdr.reply_len - (int)SZ_SG_HEADER,
627 input_size, (unsigned int) cmnd[0], 625 input_size, (unsigned int) cmnd[0],
@@ -1658,6 +1656,10 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1658 md->nr_entries = req_schp->k_use_sg; 1656 md->nr_entries = req_schp->k_use_sg;
1659 md->offset = 0; 1657 md->offset = 0;
1660 md->null_mapped = hp->dxferp ? 0 : 1; 1658 md->null_mapped = hp->dxferp ? 0 : 1;
1659 if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
1660 md->from_user = 1;
1661 else
1662 md->from_user = 0;
1661 } 1663 }
1662 1664
1663 if (iov_count) { 1665 if (iov_count) {
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index bcaba86060ab..75da6e58ce55 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -2860,8 +2860,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2860 */ 2860 */
2861 2861
2862 local_irq_restore(flags); 2862 local_irq_restore(flags);
2863 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully\n" 2863 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
2864 KERN_INFO " before abortion\n", HOSTNO);
2865 2864
2866 return SCSI_ABORT_NOT_RUNNING; 2865 return SCSI_ABORT_NOT_RUNNING;
2867} 2866}
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 97f3158fa7b5..27e84e4b1fa9 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -134,7 +134,7 @@ zalon_probe(struct parisc_device *dev)
134 134
135 host = ncr_attach(&zalon7xx_template, unit, &device); 135 host = ncr_attach(&zalon7xx_template, unit, &device);
136 if (!host) 136 if (!host)
137 goto fail; 137 return -ENODEV;
138 138
139 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { 139 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
140 dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", 140 dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ",
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index a07015d646dd..e7108e75653d 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -60,11 +60,12 @@ struct serial_private {
60 60
61static void moan_device(const char *str, struct pci_dev *dev) 61static void moan_device(const char *str, struct pci_dev *dev)
62{ 62{
63 printk(KERN_WARNING "%s: %s\n" 63 printk(KERN_WARNING
64 KERN_WARNING "Please send the output of lspci -vv, this\n" 64 "%s: %s\n"
65 KERN_WARNING "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" 65 "Please send the output of lspci -vv, this\n"
66 KERN_WARNING "manufacturer and name of serial board or\n" 66 "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
67 KERN_WARNING "modem board to rmk+serial@arm.linux.org.uk.\n", 67 "manufacturer and name of serial board or\n"
68 "modem board to rmk+serial@arm.linux.org.uk.\n",
68 pci_name(dev), str, dev->vendor, dev->device, 69 pci_name(dev), str, dev->vendor, dev->device,
69 dev->subsystem_vendor, dev->subsystem_device); 70 dev->subsystem_vendor, dev->subsystem_device);
70} 71}
@@ -759,6 +760,8 @@ static int pci_netmos_init(struct pci_dev *dev)
759 /* subdevice 0x00PS means <P> parallel, <S> serial */ 760 /* subdevice 0x00PS means <P> parallel, <S> serial */
760 unsigned int num_serial = dev->subsystem_device & 0xf; 761 unsigned int num_serial = dev->subsystem_device & 0xf;
761 762
763 if (dev->device == PCI_DEVICE_ID_NETMOS_9901)
764 return 0;
762 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && 765 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
763 dev->subsystem_device == 0x0299) 766 dev->subsystem_device == 0x0299)
764 return 0; 767 return 0;
@@ -3557,6 +3560,10 @@ static struct pci_device_id serial_pci_tbl[] = {
3557 PCI_VENDOR_ID_IBM, 0x0299, 3560 PCI_VENDOR_ID_IBM, 0x0299,
3558 0, 0, pbn_b0_bt_2_115200 }, 3561 0, 0, pbn_b0_bt_2_115200 },
3559 3562
3563 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
3564 0xA000, 0x1000,
3565 0, 0, pbn_b0_1_115200 },
3566
3560 /* 3567 /*
3561 * These entries match devices with class COMMUNICATION_SERIAL, 3568 * These entries match devices with class COMMUNICATION_SERIAL,
3562 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL 3569 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 338b15c0a548..607d43a31048 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1551,6 +1551,7 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
1551 if (ret) 1551 if (ret)
1552 goto err_add_port; 1552 goto err_add_port;
1553 1553
1554#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
1554 if (atmel_is_console_port(&port->uart) 1555 if (atmel_is_console_port(&port->uart)
1555 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { 1556 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
1556 /* 1557 /*
@@ -1559,6 +1560,7 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
1559 */ 1560 */
1560 clk_disable(port->clk); 1561 clk_disable(port->clk);
1561 } 1562 }
1563#endif
1562 1564
1563 device_init_wakeup(&pdev->dev, 1); 1565 device_init_wakeup(&pdev->dev, 1);
1564 platform_set_drvdata(pdev, port); 1566 platform_set_drvdata(pdev, port);
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 34b4ae0fe760..c108b1a0ce98 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -236,7 +236,6 @@ static int sport_startup(struct uart_port *port)
236 int retval; 236 int retval;
237 237
238 pr_debug("%s enter\n", __func__); 238 pr_debug("%s enter\n", __func__);
239 memset(buffer, 20, '\0');
240 snprintf(buffer, 20, "%s rx", up->name); 239 snprintf(buffer, 20, "%s rx", up->name);
241 retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up); 240 retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
242 if (retval) { 241 if (retval) {
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
index 141c0a3333ad..a9802e76b5fa 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
@@ -132,7 +132,7 @@ int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
132 memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) + 132 memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
133 L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); 133 L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
134 if (is_con) { 134 if (is_con) {
135 mem_addr = alloc_bootmem(memsz); 135 mem_addr = kzalloc(memsz, GFP_NOWAIT);
136 dma_addr = virt_to_bus(mem_addr); 136 dma_addr = virt_to_bus(mem_addr);
137 } 137 }
138 else 138 else
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
index 698048f64f5e..f7c24baa1416 100644
--- a/drivers/serial/msm_serial.c
+++ b/drivers/serial/msm_serial.c
@@ -730,7 +730,6 @@ static int __devexit msm_serial_remove(struct platform_device *pdev)
730} 730}
731 731
732static struct platform_driver msm_platform_driver = { 732static struct platform_driver msm_platform_driver = {
733 .probe = msm_serial_probe,
734 .remove = msm_serial_remove, 733 .remove = msm_serial_remove,
735 .driver = { 734 .driver = {
736 .name = "msm_serial", 735 .name = "msm_serial",
diff --git a/drivers/serial/s3c2400.c b/drivers/serial/s3c2400.c
index fb00ed5296e6..fed1a9a1ffb4 100644
--- a/drivers/serial/s3c2400.c
+++ b/drivers/serial/s3c2400.c
@@ -76,7 +76,7 @@ static int s3c2400_serial_probe(struct platform_device *dev)
76 return s3c24xx_serial_probe(dev, &s3c2400_uart_inf); 76 return s3c24xx_serial_probe(dev, &s3c2400_uart_inf);
77} 77}
78 78
79static struct platform_driver s3c2400_serial_drv = { 79static struct platform_driver s3c2400_serial_driver = {
80 .probe = s3c2400_serial_probe, 80 .probe = s3c2400_serial_probe,
81 .remove = __devexit_p(s3c24xx_serial_remove), 81 .remove = __devexit_p(s3c24xx_serial_remove),
82 .driver = { 82 .driver = {
@@ -85,16 +85,16 @@ static struct platform_driver s3c2400_serial_drv = {
85 }, 85 },
86}; 86};
87 87
88s3c24xx_console_init(&s3c2400_serial_drv, &s3c2400_uart_inf); 88s3c24xx_console_init(&s3c2400_serial_driver, &s3c2400_uart_inf);
89 89
90static inline int s3c2400_serial_init(void) 90static inline int s3c2400_serial_init(void)
91{ 91{
92 return s3c24xx_serial_init(&s3c2400_serial_drv, &s3c2400_uart_inf); 92 return s3c24xx_serial_init(&s3c2400_serial_driver, &s3c2400_uart_inf);
93} 93}
94 94
95static inline void s3c2400_serial_exit(void) 95static inline void s3c2400_serial_exit(void)
96{ 96{
97 platform_driver_unregister(&s3c2400_serial_drv); 97 platform_driver_unregister(&s3c2400_serial_driver);
98} 98}
99 99
100module_init(s3c2400_serial_init); 100module_init(s3c2400_serial_init);
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index b5d7cbcba2ae..c99f0821cae3 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -88,7 +88,7 @@ static int s3c2410_serial_probe(struct platform_device *dev)
88 return s3c24xx_serial_probe(dev, &s3c2410_uart_inf); 88 return s3c24xx_serial_probe(dev, &s3c2410_uart_inf);
89} 89}
90 90
91static struct platform_driver s3c2410_serial_drv = { 91static struct platform_driver s3c2410_serial_driver = {
92 .probe = s3c2410_serial_probe, 92 .probe = s3c2410_serial_probe,
93 .remove = __devexit_p(s3c24xx_serial_remove), 93 .remove = __devexit_p(s3c24xx_serial_remove),
94 .driver = { 94 .driver = {
@@ -97,16 +97,16 @@ static struct platform_driver s3c2410_serial_drv = {
97 }, 97 },
98}; 98};
99 99
100s3c24xx_console_init(&s3c2410_serial_drv, &s3c2410_uart_inf); 100s3c24xx_console_init(&s3c2410_serial_driver, &s3c2410_uart_inf);
101 101
102static int __init s3c2410_serial_init(void) 102static int __init s3c2410_serial_init(void)
103{ 103{
104 return s3c24xx_serial_init(&s3c2410_serial_drv, &s3c2410_uart_inf); 104 return s3c24xx_serial_init(&s3c2410_serial_driver, &s3c2410_uart_inf);
105} 105}
106 106
107static void __exit s3c2410_serial_exit(void) 107static void __exit s3c2410_serial_exit(void)
108{ 108{
109 platform_driver_unregister(&s3c2410_serial_drv); 109 platform_driver_unregister(&s3c2410_serial_driver);
110} 110}
111 111
112module_init(s3c2410_serial_init); 112module_init(s3c2410_serial_init);
diff --git a/drivers/serial/s3c2412.c b/drivers/serial/s3c2412.c
index 11dcb90bdfef..6e057d8809d3 100644
--- a/drivers/serial/s3c2412.c
+++ b/drivers/serial/s3c2412.c
@@ -121,7 +121,7 @@ static int s3c2412_serial_probe(struct platform_device *dev)
121 return s3c24xx_serial_probe(dev, &s3c2412_uart_inf); 121 return s3c24xx_serial_probe(dev, &s3c2412_uart_inf);
122} 122}
123 123
124static struct platform_driver s3c2412_serial_drv = { 124static struct platform_driver s3c2412_serial_driver = {
125 .probe = s3c2412_serial_probe, 125 .probe = s3c2412_serial_probe,
126 .remove = __devexit_p(s3c24xx_serial_remove), 126 .remove = __devexit_p(s3c24xx_serial_remove),
127 .driver = { 127 .driver = {
@@ -130,16 +130,16 @@ static struct platform_driver s3c2412_serial_drv = {
130 }, 130 },
131}; 131};
132 132
133s3c24xx_console_init(&s3c2412_serial_drv, &s3c2412_uart_inf); 133s3c24xx_console_init(&s3c2412_serial_driver, &s3c2412_uart_inf);
134 134
135static inline int s3c2412_serial_init(void) 135static inline int s3c2412_serial_init(void)
136{ 136{
137 return s3c24xx_serial_init(&s3c2412_serial_drv, &s3c2412_uart_inf); 137 return s3c24xx_serial_init(&s3c2412_serial_driver, &s3c2412_uart_inf);
138} 138}
139 139
140static inline void s3c2412_serial_exit(void) 140static inline void s3c2412_serial_exit(void)
141{ 141{
142 platform_driver_unregister(&s3c2412_serial_drv); 142 platform_driver_unregister(&s3c2412_serial_driver);
143} 143}
144 144
145module_init(s3c2412_serial_init); 145module_init(s3c2412_serial_init);
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 06c5b0cc47a3..69ff5d340f04 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -151,7 +151,7 @@ static int s3c2440_serial_probe(struct platform_device *dev)
151 return s3c24xx_serial_probe(dev, &s3c2440_uart_inf); 151 return s3c24xx_serial_probe(dev, &s3c2440_uart_inf);
152} 152}
153 153
154static struct platform_driver s3c2440_serial_drv = { 154static struct platform_driver s3c2440_serial_driver = {
155 .probe = s3c2440_serial_probe, 155 .probe = s3c2440_serial_probe,
156 .remove = __devexit_p(s3c24xx_serial_remove), 156 .remove = __devexit_p(s3c24xx_serial_remove),
157 .driver = { 157 .driver = {
@@ -160,16 +160,16 @@ static struct platform_driver s3c2440_serial_drv = {
160 }, 160 },
161}; 161};
162 162
163s3c24xx_console_init(&s3c2440_serial_drv, &s3c2440_uart_inf); 163s3c24xx_console_init(&s3c2440_serial_driver, &s3c2440_uart_inf);
164 164
165static int __init s3c2440_serial_init(void) 165static int __init s3c2440_serial_init(void)
166{ 166{
167 return s3c24xx_serial_init(&s3c2440_serial_drv, &s3c2440_uart_inf); 167 return s3c24xx_serial_init(&s3c2440_serial_driver, &s3c2440_uart_inf);
168} 168}
169 169
170static void __exit s3c2440_serial_exit(void) 170static void __exit s3c2440_serial_exit(void)
171{ 171{
172 platform_driver_unregister(&s3c2440_serial_drv); 172 platform_driver_unregister(&s3c2440_serial_driver);
173} 173}
174 174
175module_init(s3c2440_serial_init); 175module_init(s3c2440_serial_init);
diff --git a/drivers/serial/s3c24a0.c b/drivers/serial/s3c24a0.c
index 786a067d62ac..26c49e18bdd1 100644
--- a/drivers/serial/s3c24a0.c
+++ b/drivers/serial/s3c24a0.c
@@ -92,7 +92,7 @@ static int s3c24a0_serial_probe(struct platform_device *dev)
92 return s3c24xx_serial_probe(dev, &s3c24a0_uart_inf); 92 return s3c24xx_serial_probe(dev, &s3c24a0_uart_inf);
93} 93}
94 94
95static struct platform_driver s3c24a0_serial_drv = { 95static struct platform_driver s3c24a0_serial_driver = {
96 .probe = s3c24a0_serial_probe, 96 .probe = s3c24a0_serial_probe,
97 .remove = __devexit_p(s3c24xx_serial_remove), 97 .remove = __devexit_p(s3c24xx_serial_remove),
98 .driver = { 98 .driver = {
@@ -101,16 +101,16 @@ static struct platform_driver s3c24a0_serial_drv = {
101 }, 101 },
102}; 102};
103 103
104s3c24xx_console_init(&s3c24a0_serial_drv, &s3c24a0_uart_inf); 104s3c24xx_console_init(&s3c24a0_serial_driver, &s3c24a0_uart_inf);
105 105
106static int __init s3c24a0_serial_init(void) 106static int __init s3c24a0_serial_init(void)
107{ 107{
108 return s3c24xx_serial_init(&s3c24a0_serial_drv, &s3c24a0_uart_inf); 108 return s3c24xx_serial_init(&s3c24a0_serial_driver, &s3c24a0_uart_inf);
109} 109}
110 110
111static void __exit s3c24a0_serial_exit(void) 111static void __exit s3c24a0_serial_exit(void)
112{ 112{
113 platform_driver_unregister(&s3c24a0_serial_drv); 113 platform_driver_unregister(&s3c24a0_serial_driver);
114} 114}
115 115
116module_init(s3c24a0_serial_init); 116module_init(s3c24a0_serial_init);
diff --git a/drivers/serial/s3c6400.c b/drivers/serial/s3c6400.c
index 48f1a3781f0d..4be92ab50058 100644
--- a/drivers/serial/s3c6400.c
+++ b/drivers/serial/s3c6400.c
@@ -122,7 +122,7 @@ static int s3c6400_serial_probe(struct platform_device *dev)
122 return s3c24xx_serial_probe(dev, &s3c6400_uart_inf); 122 return s3c24xx_serial_probe(dev, &s3c6400_uart_inf);
123} 123}
124 124
125static struct platform_driver s3c6400_serial_drv = { 125static struct platform_driver s3c6400_serial_driver = {
126 .probe = s3c6400_serial_probe, 126 .probe = s3c6400_serial_probe,
127 .remove = __devexit_p(s3c24xx_serial_remove), 127 .remove = __devexit_p(s3c24xx_serial_remove),
128 .driver = { 128 .driver = {
@@ -131,16 +131,16 @@ static struct platform_driver s3c6400_serial_drv = {
131 }, 131 },
132}; 132};
133 133
134s3c24xx_console_init(&s3c6400_serial_drv, &s3c6400_uart_inf); 134s3c24xx_console_init(&s3c6400_serial_driver, &s3c6400_uart_inf);
135 135
136static int __init s3c6400_serial_init(void) 136static int __init s3c6400_serial_init(void)
137{ 137{
138 return s3c24xx_serial_init(&s3c6400_serial_drv, &s3c6400_uart_inf); 138 return s3c24xx_serial_init(&s3c6400_serial_driver, &s3c6400_uart_inf);
139} 139}
140 140
141static void __exit s3c6400_serial_exit(void) 141static void __exit s3c6400_serial_exit(void)
142{ 142{
143 platform_driver_unregister(&s3c6400_serial_drv); 143 platform_driver_unregister(&s3c6400_serial_driver);
144} 144}
145 145
146module_init(s3c6400_serial_init); 146module_init(s3c6400_serial_init);
diff --git a/drivers/serial/serial_ks8695.c b/drivers/serial/serial_ks8695.c
index 998e89dc5aaf..e0665630e4da 100644
--- a/drivers/serial/serial_ks8695.c
+++ b/drivers/serial/serial_ks8695.c
@@ -549,7 +549,7 @@ static struct uart_port ks8695uart_ports[SERIAL_KS8695_NR] = {
549 .mapbase = KS8695_UART_VA, 549 .mapbase = KS8695_UART_VA,
550 .iotype = SERIAL_IO_MEM, 550 .iotype = SERIAL_IO_MEM,
551 .irq = KS8695_IRQ_UART_TX, 551 .irq = KS8695_IRQ_UART_TX,
552 .uartclk = CLOCK_TICK_RATE * 16, 552 .uartclk = KS8695_CLOCK_RATE * 16,
553 .fifosize = 16, 553 .fifosize = 16,
554 .ops = &ks8695uart_pops, 554 .ops = &ks8695uart_pops,
555 .flags = ASYNC_BOOT_AUTOCONF, 555 .flags = ASYNC_BOOT_AUTOCONF,
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 66f52674ca0c..8e2feb563347 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -707,24 +707,25 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
707 707
708static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) 708static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
709{ 709{
710 unsigned short ssr_status, scr_status; 710 unsigned short ssr_status, scr_status, err_enabled;
711 struct uart_port *port = ptr; 711 struct uart_port *port = ptr;
712 irqreturn_t ret = IRQ_NONE; 712 irqreturn_t ret = IRQ_NONE;
713 713
714 ssr_status = sci_in(port, SCxSR); 714 ssr_status = sci_in(port, SCxSR);
715 scr_status = sci_in(port, SCSCR); 715 scr_status = sci_in(port, SCSCR);
716 err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE);
716 717
717 /* Tx Interrupt */ 718 /* Tx Interrupt */
718 if ((ssr_status & 0x0020) && (scr_status & SCI_CTRL_FLAGS_TIE)) 719 if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE))
719 ret = sci_tx_interrupt(irq, ptr); 720 ret = sci_tx_interrupt(irq, ptr);
720 /* Rx Interrupt */ 721 /* Rx Interrupt */
721 if ((ssr_status & 0x0002) && (scr_status & SCI_CTRL_FLAGS_RIE)) 722 if ((ssr_status & SCxSR_RDxF(port)) && (scr_status & SCI_CTRL_FLAGS_RIE))
722 ret = sci_rx_interrupt(irq, ptr); 723 ret = sci_rx_interrupt(irq, ptr);
723 /* Error Interrupt */ 724 /* Error Interrupt */
724 if ((ssr_status & 0x0080) && (scr_status & SCI_CTRL_FLAGS_REIE)) 725 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
725 ret = sci_er_interrupt(irq, ptr); 726 ret = sci_er_interrupt(irq, ptr);
726 /* Break Interrupt */ 727 /* Break Interrupt */
727 if ((ssr_status & 0x0010) && (scr_status & SCI_CTRL_FLAGS_REIE)) 728 if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
728 ret = sci_br_interrupt(irq, ptr); 729 ret = sci_br_interrupt(irq, ptr);
729 730
730 return ret; 731 return ret;
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/serial/vr41xx_siu.c
index 0573f3b5175e..dac550e57c29 100644
--- a/drivers/serial/vr41xx_siu.c
+++ b/drivers/serial/vr41xx_siu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for NEC VR4100 series Serial Interface Unit. 2 * Driver for NEC VR4100 series Serial Interface Unit.
3 * 3 *
4 * Copyright (C) 2004-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 4 * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
5 * 5 *
6 * Based on drivers/serial/8250.c, by Russell King. 6 * Based on drivers/serial/8250.c, by Russell King.
7 * 7 *
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index eee4b6e0af2c..9b80ad36dbba 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -59,6 +59,8 @@
59 59
60/* per-register bitmasks: */ 60/* per-register bitmasks: */
61 61
62#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE (2 << 3)
63#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP (1 << 2)
62#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0) 64#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0)
63#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1) 65#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1)
64 66
@@ -90,6 +92,7 @@
90 92
91#define OMAP2_MCSPI_CHCTRL_EN (1 << 0) 93#define OMAP2_MCSPI_CHCTRL_EN (1 << 0)
92 94
95#define OMAP2_MCSPI_WAKEUPENABLE_WKEN (1 << 0)
93 96
94/* We have 2 DMA channels per CS, one for RX and one for TX */ 97/* We have 2 DMA channels per CS, one for RX and one for TX */
95struct omap2_mcspi_dma { 98struct omap2_mcspi_dma {
@@ -269,7 +272,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
269 272
270 if (rx != NULL) { 273 if (rx != NULL) {
271 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, 274 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
272 data_type, element_count, 1, 275 data_type, element_count - 1, 1,
273 OMAP_DMA_SYNC_ELEMENT, 276 OMAP_DMA_SYNC_ELEMENT,
274 mcspi_dma->dma_rx_sync_dev, 1); 277 mcspi_dma->dma_rx_sync_dev, 1);
275 278
@@ -300,6 +303,25 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
300 if (rx != NULL) { 303 if (rx != NULL) {
301 wait_for_completion(&mcspi_dma->dma_rx_completion); 304 wait_for_completion(&mcspi_dma->dma_rx_completion);
302 dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); 305 dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE);
306 omap2_mcspi_set_enable(spi, 0);
307 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
308 & OMAP2_MCSPI_CHSTAT_RXS)) {
309 u32 w;
310
311 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
312 if (word_len <= 8)
313 ((u8 *)xfer->rx_buf)[element_count - 1] = w;
314 else if (word_len <= 16)
315 ((u16 *)xfer->rx_buf)[element_count - 1] = w;
316 else /* word_len <= 32 */
317 ((u32 *)xfer->rx_buf)[element_count - 1] = w;
318 } else {
319 dev_err(&spi->dev, "DMA RX last word empty");
320 count -= (word_len <= 8) ? 1 :
321 (word_len <= 16) ? 2 :
322 /* word_len <= 32 */ 4;
323 }
324 omap2_mcspi_set_enable(spi, 1);
303 } 325 }
304 return count; 326 return count;
305} 327}
@@ -873,8 +895,12 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
873 } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); 895 } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
874 896
875 mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, 897 mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
876 /* (3 << 8) | (2 << 3) | */ 898 OMAP2_MCSPI_SYSCONFIG_AUTOIDLE |
877 OMAP2_MCSPI_SYSCONFIG_AUTOIDLE); 899 OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP |
900 OMAP2_MCSPI_SYSCONFIG_SMARTIDLE);
901
902 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
903 OMAP2_MCSPI_WAKEUPENABLE_WKEN);
878 904
879 omap2_mcspi_set_master_mode(master); 905 omap2_mcspi_set_master_mode(master);
880 906
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index aa90ddb37066..8980a5640bd9 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -514,6 +514,8 @@ static int __init uwire_probe(struct platform_device *pdev)
514 /* the spi->mode bits understood by this driver: */ 514 /* the spi->mode bits understood by this driver: */
515 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 515 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
516 516
517 master->flags = SPI_MASTER_HALF_DUPLEX;
518
517 master->bus_num = 2; /* "official" */ 519 master->bus_num = 2; /* "official" */
518 master->num_chipselect = 4; 520 master->num_chipselect = 4;
519 master->setup = uwire_setup; 521 master->setup = uwire_setup;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 2a5abc08e857..f1db395dd889 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -258,6 +258,11 @@ static void bitbang_work(struct work_struct *work)
258 struct spi_bitbang *bitbang = 258 struct spi_bitbang *bitbang =
259 container_of(work, struct spi_bitbang, work); 259 container_of(work, struct spi_bitbang, work);
260 unsigned long flags; 260 unsigned long flags;
261 int do_setup = -1;
262 int (*setup_transfer)(struct spi_device *,
263 struct spi_transfer *);
264
265 setup_transfer = bitbang->setup_transfer;
261 266
262 spin_lock_irqsave(&bitbang->lock, flags); 267 spin_lock_irqsave(&bitbang->lock, flags);
263 bitbang->busy = 1; 268 bitbang->busy = 1;
@@ -269,8 +274,6 @@ static void bitbang_work(struct work_struct *work)
269 unsigned tmp; 274 unsigned tmp;
270 unsigned cs_change; 275 unsigned cs_change;
271 int status; 276 int status;
272 int (*setup_transfer)(struct spi_device *,
273 struct spi_transfer *);
274 277
275 m = container_of(bitbang->queue.next, struct spi_message, 278 m = container_of(bitbang->queue.next, struct spi_message,
276 queue); 279 queue);
@@ -287,19 +290,19 @@ static void bitbang_work(struct work_struct *work)
287 tmp = 0; 290 tmp = 0;
288 cs_change = 1; 291 cs_change = 1;
289 status = 0; 292 status = 0;
290 setup_transfer = NULL;
291 293
292 list_for_each_entry (t, &m->transfers, transfer_list) { 294 list_for_each_entry (t, &m->transfers, transfer_list) {
293 295
294 /* override or restore speed and wordsize */ 296 /* override speed or wordsize? */
295 if (t->speed_hz || t->bits_per_word) { 297 if (t->speed_hz || t->bits_per_word)
296 setup_transfer = bitbang->setup_transfer; 298 do_setup = 1;
299
300 /* init (-1) or override (1) transfer params */
301 if (do_setup != 0) {
297 if (!setup_transfer) { 302 if (!setup_transfer) {
298 status = -ENOPROTOOPT; 303 status = -ENOPROTOOPT;
299 break; 304 break;
300 } 305 }
301 }
302 if (setup_transfer) {
303 status = setup_transfer(spi, t); 306 status = setup_transfer(spi, t);
304 if (status < 0) 307 if (status < 0)
305 break; 308 break;
@@ -363,9 +366,10 @@ static void bitbang_work(struct work_struct *work)
363 m->status = status; 366 m->status = status;
364 m->complete(m->context); 367 m->complete(m->context);
365 368
366 /* restore speed and wordsize */ 369 /* restore speed and wordsize if it was overridden */
367 if (setup_transfer) 370 if (do_setup == 1)
368 setup_transfer(spi, NULL); 371 setup_transfer(spi, NULL);
372 do_setup = 0;
369 373
370 /* normally deactivate chipselect ... unless no error and 374 /* normally deactivate chipselect ... unless no error and
371 * cs_change has hinted that the next message will probably 375 * cs_change has hinted that the next message will probably
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5d869c4d3eb2..606e7a40a8da 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -58,15 +58,20 @@ static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG];
58 58
59 59
60/* Bit masks for spi_device.mode management. Note that incorrect 60/* Bit masks for spi_device.mode management. Note that incorrect
61 * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other 61 * settings for some settings can cause *lots* of trouble for other
62 * devices on a shared bus: CS_HIGH, because this device will be 62 * devices on a shared bus:
63 * active when it shouldn't be; 3WIRE, because when active it won't
64 * behave as it should.
65 * 63 *
66 * REVISIT should changing those two modes be privileged? 64 * - CS_HIGH ... this device will be active when it shouldn't be
65 * - 3WIRE ... when active, it won't behave as it should
66 * - NO_CS ... there will be no explicit message boundaries; this
67 * is completely incompatible with the shared bus model
68 * - READY ... transfers may proceed when they shouldn't.
69 *
70 * REVISIT should changing those flags be privileged?
67 */ 71 */
68#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \ 72#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
69 | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP) 73 | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
74 | SPI_NO_CS | SPI_READY)
70 75
71struct spidev_data { 76struct spidev_data {
72 dev_t devt; 77 dev_t devt;
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 3fd3e3b412b6..3c6feed46f6e 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -49,29 +49,54 @@ static const u32 ipsflag_irq_shift[] = {
49 49
50static inline u32 ssb_irqflag(struct ssb_device *dev) 50static inline u32 ssb_irqflag(struct ssb_device *dev)
51{ 51{
52 return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG; 52 u32 tpsflag = ssb_read32(dev, SSB_TPSFLAG);
53 if (tpsflag)
54 return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG;
55 else
56 /* not irq supported */
57 return 0x3f;
58}
59
60static struct ssb_device *find_device(struct ssb_device *rdev, int irqflag)
61{
62 struct ssb_bus *bus = rdev->bus;
63 int i;
64 for (i = 0; i < bus->nr_devices; i++) {
65 struct ssb_device *dev;
66 dev = &(bus->devices[i]);
67 if (ssb_irqflag(dev) == irqflag)
68 return dev;
69 }
70 return NULL;
53} 71}
54 72
55/* Get the MIPS IRQ assignment for a specified device. 73/* Get the MIPS IRQ assignment for a specified device.
56 * If unassigned, 0 is returned. 74 * If unassigned, 0 is returned.
75 * If disabled, 5 is returned.
76 * If not supported, 6 is returned.
57 */ 77 */
58unsigned int ssb_mips_irq(struct ssb_device *dev) 78unsigned int ssb_mips_irq(struct ssb_device *dev)
59{ 79{
60 struct ssb_bus *bus = dev->bus; 80 struct ssb_bus *bus = dev->bus;
81 struct ssb_device *mdev = bus->mipscore.dev;
61 u32 irqflag; 82 u32 irqflag;
62 u32 ipsflag; 83 u32 ipsflag;
63 u32 tmp; 84 u32 tmp;
64 unsigned int irq; 85 unsigned int irq;
65 86
66 irqflag = ssb_irqflag(dev); 87 irqflag = ssb_irqflag(dev);
88 if (irqflag == 0x3f)
89 return 6;
67 ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG); 90 ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG);
68 for (irq = 1; irq <= 4; irq++) { 91 for (irq = 1; irq <= 4; irq++) {
69 tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]); 92 tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]);
70 if (tmp == irqflag) 93 if (tmp == irqflag)
71 break; 94 break;
72 } 95 }
73 if (irq == 5) 96 if (irq == 5) {
74 irq = 0; 97 if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))
98 irq = 0;
99 }
75 100
76 return irq; 101 return irq;
77} 102}
@@ -97,25 +122,56 @@ static void set_irq(struct ssb_device *dev, unsigned int irq)
97 struct ssb_device *mdev = bus->mipscore.dev; 122 struct ssb_device *mdev = bus->mipscore.dev;
98 u32 irqflag = ssb_irqflag(dev); 123 u32 irqflag = ssb_irqflag(dev);
99 124
125 BUG_ON(oldirq == 6);
126
100 dev->irq = irq + 2; 127 dev->irq = irq + 2;
101 128
102 ssb_dprintk(KERN_INFO PFX
103 "set_irq: core 0x%04x, irq %d => %d\n",
104 dev->id.coreid, oldirq, irq);
105 /* clear the old irq */ 129 /* clear the old irq */
106 if (oldirq == 0) 130 if (oldirq == 0)
107 ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))); 131 ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)));
108 else 132 else if (oldirq != 5)
109 clear_irq(bus, oldirq); 133 clear_irq(bus, oldirq);
110 134
111 /* assign the new one */ 135 /* assign the new one */
112 if (irq == 0) { 136 if (irq == 0) {
113 ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC))); 137 ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC)));
114 } else { 138 } else {
139 u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG);
140 if ((ipsflag & ipsflag_irq_mask[irq]) != ipsflag_irq_mask[irq]) {
141 u32 oldipsflag = (ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq];
142 struct ssb_device *olddev = find_device(dev, oldipsflag);
143 if (olddev)
144 set_irq(olddev, 0);
145 }
115 irqflag <<= ipsflag_irq_shift[irq]; 146 irqflag <<= ipsflag_irq_shift[irq];
116 irqflag |= (ssb_read32(mdev, SSB_IPSFLAG) & ~ipsflag_irq_mask[irq]); 147 irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]);
117 ssb_write32(mdev, SSB_IPSFLAG, irqflag); 148 ssb_write32(mdev, SSB_IPSFLAG, irqflag);
118 } 149 }
150 ssb_dprintk(KERN_INFO PFX
151 "set_irq: core 0x%04x, irq %d => %d\n",
152 dev->id.coreid, oldirq+2, irq+2);
153}
154
155static void print_irq(struct ssb_device *dev, unsigned int irq)
156{
157 int i;
158 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
159 ssb_dprintk(KERN_INFO PFX
160 "core 0x%04x, irq :", dev->id.coreid);
161 for (i = 0; i <= 6; i++) {
162 ssb_dprintk(" %s%s", irq_name[i], i==irq?"*":" ");
163 }
164 ssb_dprintk("\n");
165}
166
167static void dump_irq(struct ssb_bus *bus)
168{
169 int i;
170 for (i = 0; i < bus->nr_devices; i++) {
171 struct ssb_device *dev;
172 dev = &(bus->devices[i]);
173 print_irq(dev, ssb_mips_irq(dev));
174 }
119} 175}
120 176
121static void ssb_mips_serial_init(struct ssb_mipscore *mcore) 177static void ssb_mips_serial_init(struct ssb_mipscore *mcore)
@@ -197,16 +253,23 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
197 253
198 /* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */ 254 /* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */
199 for (irq = 2, i = 0; i < bus->nr_devices; i++) { 255 for (irq = 2, i = 0; i < bus->nr_devices; i++) {
256 int mips_irq;
200 dev = &(bus->devices[i]); 257 dev = &(bus->devices[i]);
201 dev->irq = ssb_mips_irq(dev) + 2; 258 mips_irq = ssb_mips_irq(dev);
259 if (mips_irq > 4)
260 dev->irq = 0;
261 else
262 dev->irq = mips_irq + 2;
263 if (dev->irq > 5)
264 continue;
202 switch (dev->id.coreid) { 265 switch (dev->id.coreid) {
203 case SSB_DEV_USB11_HOST: 266 case SSB_DEV_USB11_HOST:
204 /* shouldn't need a separate irq line for non-4710, most of them have a proper 267 /* shouldn't need a separate irq line for non-4710, most of them have a proper
205 * external usb controller on the pci */ 268 * external usb controller on the pci */
206 if ((bus->chip_id == 0x4710) && (irq <= 4)) { 269 if ((bus->chip_id == 0x4710) && (irq <= 4)) {
207 set_irq(dev, irq++); 270 set_irq(dev, irq++);
208 break;
209 } 271 }
272 break;
210 /* fallthrough */ 273 /* fallthrough */
211 case SSB_DEV_PCI: 274 case SSB_DEV_PCI:
212 case SSB_DEV_ETHERNET: 275 case SSB_DEV_ETHERNET:
@@ -220,6 +283,8 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
220 } 283 }
221 } 284 }
222 } 285 }
286 ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n");
287 dump_irq(bus);
223 288
224 ssb_mips_serial_init(mcore); 289 ssb_mips_serial_init(mcore);
225 ssb_mips_flash_detect(mcore); 290 ssb_mips_flash_detect(mcore);
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index fbfadbac67e8..100e7a5c5ea1 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -583,7 +583,7 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom)
583 ssb_printk("."); 583 ssb_printk(".");
584 err = ssb_pcmcia_sprom_write(bus, i, sprom[i]); 584 err = ssb_pcmcia_sprom_write(bus, i, sprom[i]);
585 if (err) { 585 if (err) {
586 ssb_printk("\n" KERN_NOTICE PFX 586 ssb_printk(KERN_NOTICE PFX
587 "Failed to write to SPROM.\n"); 587 "Failed to write to SPROM.\n");
588 failed = 1; 588 failed = 1;
589 break; 589 break;
@@ -591,7 +591,7 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom)
591 } 591 }
592 err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS); 592 err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS);
593 if (err) { 593 if (err) {
594 ssb_printk("\n" KERN_NOTICE PFX 594 ssb_printk(KERN_NOTICE PFX
595 "Could not disable SPROM write access.\n"); 595 "Could not disable SPROM write access.\n");
596 failed = 1; 596 failed = 1;
597 } 597 }
@@ -678,7 +678,8 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
678 sprom->board_rev = tuple.TupleData[1]; 678 sprom->board_rev = tuple.TupleData[1];
679 break; 679 break;
680 case SSB_PCMCIA_CIS_PA: 680 case SSB_PCMCIA_CIS_PA:
681 GOTO_ERROR_ON(tuple.TupleDataLen != 9, 681 GOTO_ERROR_ON((tuple.TupleDataLen != 9) &&
682 (tuple.TupleDataLen != 10),
682 "pa tpl size"); 683 "pa tpl size");
683 sprom->pa0b0 = tuple.TupleData[1] | 684 sprom->pa0b0 = tuple.TupleData[1] |
684 ((u16)tuple.TupleData[2] << 8); 685 ((u16)tuple.TupleData[2] << 8);
@@ -718,7 +719,8 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
718 sprom->antenna_gain.ghz5.a3 = tuple.TupleData[1]; 719 sprom->antenna_gain.ghz5.a3 = tuple.TupleData[1];
719 break; 720 break;
720 case SSB_PCMCIA_CIS_BFLAGS: 721 case SSB_PCMCIA_CIS_BFLAGS:
721 GOTO_ERROR_ON(tuple.TupleDataLen != 3, 722 GOTO_ERROR_ON((tuple.TupleDataLen != 3) &&
723 (tuple.TupleDataLen != 5),
722 "bfl tpl size"); 724 "bfl tpl size");
723 sprom->boardflags_lo = tuple.TupleData[1] | 725 sprom->boardflags_lo = tuple.TupleData[1] |
724 ((u16)tuple.TupleData[2] << 8); 726 ((u16)tuple.TupleData[2] << 8);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 348bf61a8fec..975ecddbce30 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -103,8 +103,6 @@ source "drivers/staging/pohmelfs/Kconfig"
103 103
104source "drivers/staging/stlc45xx/Kconfig" 104source "drivers/staging/stlc45xx/Kconfig"
105 105
106source "drivers/staging/uc2322/Kconfig"
107
108source "drivers/staging/b3dfg/Kconfig" 106source "drivers/staging/b3dfg/Kconfig"
109 107
110source "drivers/staging/phison/Kconfig" 108source "drivers/staging/phison/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 8d61d7b4debf..2241ae1b21ee 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -34,7 +34,6 @@ obj-$(CONFIG_ANDROID) += android/
34obj-$(CONFIG_DST) += dst/ 34obj-$(CONFIG_DST) += dst/
35obj-$(CONFIG_POHMELFS) += pohmelfs/ 35obj-$(CONFIG_POHMELFS) += pohmelfs/
36obj-$(CONFIG_STLC45XX) += stlc45xx/ 36obj-$(CONFIG_STLC45XX) += stlc45xx/
37obj-$(CONFIG_USB_SERIAL_ATEN2011) += uc2322/
38obj-$(CONFIG_B3DFG) += b3dfg/ 37obj-$(CONFIG_B3DFG) += b3dfg/
39obj-$(CONFIG_IDE_PHISON) += phison/ 38obj-$(CONFIG_IDE_PHISON) += phison/
40obj-$(CONFIG_PLAN9AUTH) += p9auth/ 39obj-$(CONFIG_PLAN9AUTH) += p9auth/
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index fe72240f5a9e..f934393f3959 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -96,19 +96,21 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
96 96
97 read_lock(&tasklist_lock); 97 read_lock(&tasklist_lock);
98 for_each_process(p) { 98 for_each_process(p) {
99 struct mm_struct *mm;
99 int oom_adj; 100 int oom_adj;
100 101
101 task_lock(p); 102 task_lock(p);
102 if (!p->mm) { 103 mm = p->mm;
104 if (!mm) {
103 task_unlock(p); 105 task_unlock(p);
104 continue; 106 continue;
105 } 107 }
106 oom_adj = p->oomkilladj; 108 oom_adj = mm->oom_adj;
107 if (oom_adj < min_adj) { 109 if (oom_adj < min_adj) {
108 task_unlock(p); 110 task_unlock(p);
109 continue; 111 continue;
110 } 112 }
111 tasksize = get_mm_rss(p->mm); 113 tasksize = get_mm_rss(mm);
112 task_unlock(p); 114 task_unlock(p);
113 if (tasksize <= 0) 115 if (tasksize <= 0)
114 continue; 116 continue;
diff --git a/drivers/staging/b3dfg/Kconfig b/drivers/staging/b3dfg/Kconfig
index 524231047de5..9e6573cf97d3 100644
--- a/drivers/staging/b3dfg/Kconfig
+++ b/drivers/staging/b3dfg/Kconfig
@@ -1,5 +1,6 @@
1config B3DFG 1config B3DFG
2 tristate "Brontes 3d Frame Framegrabber" 2 tristate "Brontes 3d Frame Framegrabber"
3 depends on PCI
3 default n 4 default n
4 ---help--- 5 ---help---
5 This driver provides support for the Brontes 3d Framegrabber 6 This driver provides support for the Brontes 3d Framegrabber
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index baf83c6a9412..e3c3adc282e2 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -45,6 +45,8 @@ Devices: [JR3] PCI force sensor board (jr3_pci)
45#include <linux/delay.h> 45#include <linux/delay.h>
46#include <linux/ctype.h> 46#include <linux/ctype.h>
47#include <linux/firmware.h> 47#include <linux/firmware.h>
48#include <linux/jiffies.h>
49#include <linux/timer.h>
48#include "comedi_pci.h" 50#include "comedi_pci.h"
49#include "jr3_pci.h" 51#include "jr3_pci.h"
50 52
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 92121cf8c45c..5d9bab352c1d 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -111,9 +111,13 @@ static const struct s626_board s626_boards[] = {
111#define PCI_VENDOR_ID_S626 0x1131 111#define PCI_VENDOR_ID_S626 0x1131
112#define PCI_DEVICE_ID_S626 0x7146 112#define PCI_DEVICE_ID_S626 0x7146
113 113
114/*
115 * For devices with vendor:device id == 0x1131:0x7146 you must specify
116 * also subvendor:subdevice ids, because otherwise it will conflict with
117 * Philips SAA7146 media/dvb based cards.
118 */
114static DEFINE_PCI_DEVICE_TABLE(s626_pci_table) = { 119static DEFINE_PCI_DEVICE_TABLE(s626_pci_table) = {
115 {PCI_VENDOR_ID_S626, PCI_DEVICE_ID_S626, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 120 {PCI_VENDOR_ID_S626, PCI_DEVICE_ID_S626, 0x6000, 0x0272, 0, 0, 0},
116 0},
117 {0} 121 {0}
118}; 122};
119 123
@@ -499,25 +503,26 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it)
499 resource_size_t resourceStart; 503 resource_size_t resourceStart;
500 dma_addr_t appdma; 504 dma_addr_t appdma;
501 struct comedi_subdevice *s; 505 struct comedi_subdevice *s;
502 struct pci_dev *pdev; 506 const struct pci_device_id *ids;
507 struct pci_dev *pdev = NULL;
503 508
504 if (alloc_private(dev, sizeof(struct s626_private)) < 0) 509 if (alloc_private(dev, sizeof(struct s626_private)) < 0)
505 return -ENOMEM; 510 return -ENOMEM;
506 511
507 for (pdev = pci_get_device(PCI_VENDOR_ID_S626, PCI_DEVICE_ID_S626, 512 for (i = 0; i < (ARRAY_SIZE(s626_pci_table) - 1) && !pdev; i++) {
508 NULL); pdev != NULL; 513 ids = &s626_pci_table[i];
509 pdev = pci_get_device(PCI_VENDOR_ID_S626, 514 do {
510 PCI_DEVICE_ID_S626, pdev)) { 515 pdev = pci_get_subsys(ids->vendor, ids->device, ids->subvendor,
511 if (it->options[0] || it->options[1]) { 516 ids->subdevice, pdev);
512 if (pdev->bus->number == it->options[0] && 517
513 PCI_SLOT(pdev->devfn) == it->options[1]) { 518 if ((it->options[0] || it->options[1]) && pdev) {
514 /* matches requested bus/slot */ 519 /* matches requested bus/slot */
520 if (pdev->bus->number == it->options[0] &&
521 PCI_SLOT(pdev->devfn) == it->options[1])
522 break;
523 } else
515 break; 524 break;
516 } 525 } while (1);
517 } else {
518 /* no bus/slot specified */
519 break;
520 }
521 } 526 }
522 devpriv->pdev = pdev; 527 devpriv->pdev = pdev;
523 528
diff --git a/drivers/staging/go7007/s2250-loader.c b/drivers/staging/go7007/s2250-loader.c
index a5e4acab089e..bb22347af60e 100644
--- a/drivers/staging/go7007/s2250-loader.c
+++ b/drivers/staging/go7007/s2250-loader.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/smp_lock.h>
20#include <linux/usb.h> 21#include <linux/usb.h>
21#include <dvb-usb.h> 22#include <dvb-usb.h>
22 23
diff --git a/drivers/staging/heci/Kconfig b/drivers/staging/heci/Kconfig
index ae8d588d3a27..c7206f8bcd93 100644
--- a/drivers/staging/heci/Kconfig
+++ b/drivers/staging/heci/Kconfig
@@ -1,5 +1,6 @@
1config HECI 1config HECI
2 tristate "Intel Management Engine Interface (MEI) Support" 2 tristate "Intel Management Engine Interface (MEI) Support"
3 depends on PCI
3 ---help--- 4 ---help---
4 The Intel Management Engine Interface (Intel MEI) driver allows 5 The Intel Management Engine Interface (Intel MEI) driver allows
5 applications to access the Active Management Technology 6 applications to access the Active Management Technology
diff --git a/drivers/staging/meilhaus/TODO b/drivers/staging/meilhaus/TODO
index 6ec25203089c..d6ce39823de6 100644
--- a/drivers/staging/meilhaus/TODO
+++ b/drivers/staging/meilhaus/TODO
@@ -7,4 +7,4 @@ TODO:
7 - possible comedi merge 7 - possible comedi merge
8 8
9Please send cleanup patches to Greg Kroah-Hartman <greg@kroah.com> 9Please send cleanup patches to Greg Kroah-Hartman <greg@kroah.com>
10and CC: David Kiliani <mail@davidkiliani.de> 10and CC: David Kiliani <mail@davidkiliani.de> and Meilhaus Support <support@meilhaus.de>
diff --git a/drivers/staging/rspiusb/rspiusb.c b/drivers/staging/rspiusb/rspiusb.c
index 1cdfe69585ea..04e2f92c0f62 100644
--- a/drivers/staging/rspiusb/rspiusb.c
+++ b/drivers/staging/rspiusb/rspiusb.c
@@ -444,8 +444,7 @@ static void piusb_write_bulk_callback(struct urb *urb)
444 __func__, status); 444 __func__, status);
445 445
446 pdx->pendingWrite = 0; 446 pdx->pendingWrite = 0;
447 usb_buffer_free(urb->dev, urb->transfer_buffer_length, 447 kfree(urb->transfer_buffer);
448 urb->transfer_buffer, urb->transfer_dma);
449} 448}
450 449
451int piusb_output(struct ioctl_struct *io, unsigned char *uBuf, int len, 450int piusb_output(struct ioctl_struct *io, unsigned char *uBuf, int len,
@@ -457,9 +456,7 @@ int piusb_output(struct ioctl_struct *io, unsigned char *uBuf, int len,
457 456
458 urb = usb_alloc_urb(0, GFP_KERNEL); 457 urb = usb_alloc_urb(0, GFP_KERNEL);
459 if (urb != NULL) { 458 if (urb != NULL) {
460 kbuf = 459 kbuf = kmalloc(len, GFP_KERNEL);
461 usb_buffer_alloc(pdx->udev, len, GFP_KERNEL,
462 &urb->transfer_dma);
463 if (!kbuf) { 460 if (!kbuf) {
464 dev_err(&pdx->udev->dev, "buffer_alloc failed\n"); 461 dev_err(&pdx->udev->dev, "buffer_alloc failed\n");
465 return -ENOMEM; 462 return -ENOMEM;
@@ -470,7 +467,6 @@ int piusb_output(struct ioctl_struct *io, unsigned char *uBuf, int len,
470 } 467 }
471 usb_fill_bulk_urb(urb, pdx->udev, pdx->hEP[io->endpoint], kbuf, 468 usb_fill_bulk_urb(urb, pdx->udev, pdx->hEP[io->endpoint], kbuf,
472 len, piusb_write_bulk_callback, pdx); 469 len, piusb_write_bulk_callback, pdx);
473 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
474 err = usb_submit_urb(urb, GFP_KERNEL); 470 err = usb_submit_urb(urb, GFP_KERNEL);
475 if (err) { 471 if (err) {
476 dev_err(&pdx->udev->dev, 472 dev_err(&pdx->udev->dev,
@@ -641,7 +637,7 @@ static int MapUserBuffer(struct ioctl_struct *io, struct device_extension *pdx)
641 numPagesRequired = 637 numPagesRequired =
642 ((uaddr & ~PAGE_MASK) + count + ~PAGE_MASK) >> PAGE_SHIFT; 638 ((uaddr & ~PAGE_MASK) + count + ~PAGE_MASK) >> PAGE_SHIFT;
643 dbg("Number of pages needed = %d", numPagesRequired); 639 dbg("Number of pages needed = %d", numPagesRequired);
644 maplist_p = vmalloc(numPagesRequired * sizeof(struct page)); 640 maplist_p = vmalloc(numPagesRequired * sizeof(struct page *));
645 if (!maplist_p) { 641 if (!maplist_p) {
646 dbg("Can't Allocate Memory for maplist_p"); 642 dbg("Can't Allocate Memory for maplist_p");
647 return -ENOMEM; 643 return -ENOMEM;
@@ -712,9 +708,7 @@ static int MapUserBuffer(struct ioctl_struct *io, struct device_extension *pdx)
712 usb_fill_bulk_urb(pdx->PixelUrb[frameInfo][i], 708 usb_fill_bulk_urb(pdx->PixelUrb[frameInfo][i],
713 pdx->udev, 709 pdx->udev,
714 epAddr, 710 epAddr,
715 (dma_addr_t *) sg_dma_address(&pdx-> 711 NULL, // non-DMA HC? buy a better hardware
716 sgl[frameInfo]
717 [i]),
718 sg_dma_len(&pdx->sgl[frameInfo][i]), 712 sg_dma_len(&pdx->sgl[frameInfo][i]),
719 piusb_readPIXEL_callback, (void *)pdx); 713 piusb_readPIXEL_callback, (void *)pdx);
720 pdx->PixelUrb[frameInfo][i]->transfer_dma = 714 pdx->PixelUrb[frameInfo][i]->transfer_dma =
@@ -722,6 +716,8 @@ static int MapUserBuffer(struct ioctl_struct *io, struct device_extension *pdx)
722 pdx->PixelUrb[frameInfo][i]->transfer_flags = 716 pdx->PixelUrb[frameInfo][i]->transfer_flags =
723 URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT; 717 URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT;
724 } 718 }
719 if (i == 0)
720 return -EINVAL;
725 /* only interrupt when last URB completes */ 721 /* only interrupt when last URB completes */
726 pdx->PixelUrb[frameInfo][--i]->transfer_flags &= ~URB_NO_INTERRUPT; 722 pdx->PixelUrb[frameInfo][--i]->transfer_flags &= ~URB_NO_INTERRUPT;
727 pdx->pendedPixelUrbs[frameInfo] = 723 pdx->pendedPixelUrbs[frameInfo] =
diff --git a/drivers/staging/rt2860/rt_linux.h b/drivers/staging/rt2860/rt_linux.h
index 85175c182432..25b53ac3f820 100644
--- a/drivers/staging/rt2860/rt_linux.h
+++ b/drivers/staging/rt2860/rt_linux.h
@@ -43,9 +43,6 @@
43#include "rtmp_type.h" 43#include "rtmp_type.h"
44#include <linux/module.h> 44#include <linux/module.h>
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#if !defined(RT2860) && !defined(RT30xx)
47#include <linux/kthread.h>
48#endif
49 46
50#include <linux/spinlock.h> 47#include <linux/spinlock.h>
51#include <linux/init.h> 48#include <linux/init.h>
@@ -166,9 +163,7 @@ typedef int (*HARD_START_XMIT_FUNC)(struct sk_buff *skb, struct net_device *net_
166 163
167#ifndef RT30xx 164#ifndef RT30xx
168typedef struct pid * THREAD_PID; 165typedef struct pid * THREAD_PID;
169#ifdef RT2860
170#define THREAD_PID_INIT_VALUE NULL 166#define THREAD_PID_INIT_VALUE NULL
171#endif
172#define GET_PID(_v) find_get_pid(_v) 167#define GET_PID(_v) find_get_pid(_v)
173#define GET_PID_NUMBER(_v) pid_nr(_v) 168#define GET_PID_NUMBER(_v) pid_nr(_v)
174#define CHECK_PID_LEGALITY(_pid) if (pid_nr(_pid) >= 0) 169#define CHECK_PID_LEGALITY(_pid) if (pid_nr(_pid) >= 0)
@@ -188,12 +183,12 @@ struct os_cookie {
188 dma_addr_t pAd_pa; 183 dma_addr_t pAd_pa;
189#endif 184#endif
190#ifdef RT2870 185#ifdef RT2870
191 struct usb_device *pUsb_Dev; 186 struct usb_device *pUsb_Dev;
192 187
193#ifndef RT30xx 188#ifndef RT30xx
194 struct task_struct *MLMEThr_task; 189 THREAD_PID MLMEThr_pid;
195 struct task_struct *RTUSBCmdThr_task; 190 THREAD_PID RTUSBCmdThr_pid;
196 struct task_struct *TimerQThr_task; 191 THREAD_PID TimerQThr_pid;
197#endif 192#endif
198#ifdef RT30xx 193#ifdef RT30xx
199 struct pid *MLMEThr_pid; 194 struct pid *MLMEThr_pid;
diff --git a/drivers/staging/rt2870/2870_main_dev.c b/drivers/staging/rt2870/2870_main_dev.c
index dd01c64fbf61..a4e8696ca39c 100644
--- a/drivers/staging/rt2870/2870_main_dev.c
+++ b/drivers/staging/rt2870/2870_main_dev.c
@@ -235,7 +235,7 @@ INT MlmeThread(
235 DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__)); 235 DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__));
236 236
237#ifndef RT30xx 237#ifndef RT30xx
238 pObj->MLMEThr_task = NULL; 238 pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE;
239#endif 239#endif
240#ifdef RT30xx 240#ifdef RT30xx
241 pObj->MLMEThr_pid = NULL; 241 pObj->MLMEThr_pid = NULL;
@@ -348,7 +348,7 @@ INT RTUSBCmdThread(
348 DBGPRINT(RT_DEBUG_TRACE,( "<---RTUSBCmdThread\n")); 348 DBGPRINT(RT_DEBUG_TRACE,( "<---RTUSBCmdThread\n"));
349 349
350#ifndef RT30xx 350#ifndef RT30xx
351 pObj->RTUSBCmdThr_task = NULL; 351 pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE;
352#endif 352#endif
353#ifdef RT30xx 353#ifdef RT30xx
354 pObj->RTUSBCmdThr_pid = NULL; 354 pObj->RTUSBCmdThr_pid = NULL;
@@ -447,7 +447,7 @@ INT TimerQThread(
447 DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__)); 447 DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__));
448 448
449#ifndef RT30xx 449#ifndef RT30xx
450 pObj->TimerQThr_task = NULL; 450 pObj->TimerQThr_pid = THREAD_PID_INIT_VALUE;
451#endif 451#endif
452#ifdef RT30xx 452#ifdef RT30xx
453 pObj->TimerQThr_pid = NULL; 453 pObj->TimerQThr_pid = NULL;
@@ -883,46 +883,69 @@ VOID RT28xxThreadTerminate(
883 883
884 // Terminate Threads 884 // Terminate Threads
885#ifndef RT30xx 885#ifndef RT30xx
886 BUG_ON(pObj->TimerQThr_task == NULL); 886 CHECK_PID_LEGALITY(pObj->TimerQThr_pid)
887 CHECK_PID_LEGALITY(task_pid(pObj->TimerQThr_task))
888 { 887 {
889 POS_COOKIE pObj = (POS_COOKIE)pAd->OS_Cookie; 888 POS_COOKIE pObj = (POS_COOKIE)pAd->OS_Cookie;
890 889
891 printk(KERN_DEBUG "Terminate the TimerQThr pid=%d!\n", 890 printk("Terminate the TimerQThr_pid=%d!\n", GET_PID_NUMBER(pObj->TimerQThr_pid));
892 pid_nr(task_pid(pObj->TimerQThr_task)));
893 mb(); 891 mb();
894 pAd->TimerFunc_kill = 1; 892 pAd->TimerFunc_kill = 1;
895 mb(); 893 mb();
896 kthread_stop(pObj->TimerQThr_task); 894 ret = KILL_THREAD_PID(pObj->TimerQThr_pid, SIGTERM, 1);
897 pObj->TimerQThr_task = NULL; 895 if (ret)
896 {
897 printk(KERN_WARNING "%s: unable to stop TimerQThread, pid=%d, ret=%d!\n",
898 pAd->net_dev->name, GET_PID_NUMBER(pObj->TimerQThr_pid), ret);
899 }
900 else
901 {
902 wait_for_completion(&pAd->TimerQComplete);
903 pObj->TimerQThr_pid = THREAD_PID_INIT_VALUE;
904 }
898 } 905 }
899 906
900 BUG_ON(pObj->MLMEThr_task == NULL); 907 CHECK_PID_LEGALITY(pObj->MLMEThr_pid)
901 CHECK_PID_LEGALITY(task_pid(pObj->MLMEThr_task))
902 { 908 {
903 printk(KERN_DEBUG "Terminate the MLMEThr pid=%d!\n", 909 printk("Terminate the MLMEThr_pid=%d!\n", GET_PID_NUMBER(pObj->MLMEThr_pid));
904 pid_nr(task_pid(pObj->MLMEThr_task)));
905 mb(); 910 mb();
906 pAd->mlme_kill = 1; 911 pAd->mlme_kill = 1;
907 //RT28XX_MLME_HANDLER(pAd); 912 //RT28XX_MLME_HANDLER(pAd);
908 mb(); 913 mb();
909 kthread_stop(pObj->MLMEThr_task); 914 ret = KILL_THREAD_PID(pObj->MLMEThr_pid, SIGTERM, 1);
910 pObj->MLMEThr_task = NULL; 915 if (ret)
916 {
917 printk (KERN_WARNING "%s: unable to Mlme thread, pid=%d, ret=%d!\n",
918 pAd->net_dev->name, GET_PID_NUMBER(pObj->MLMEThr_pid), ret);
919 }
920 else
921 {
922 //wait_for_completion (&pAd->notify);
923 wait_for_completion (&pAd->mlmeComplete);
924 pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE;
925 }
911 } 926 }
912 927
913 BUG_ON(pObj->RTUSBCmdThr_task == NULL); 928 CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid)
914 CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task))
915 { 929 {
916 printk(KERN_DEBUG "Terminate the RTUSBCmdThr pid=%d!\n", 930 printk("Terminate the RTUSBCmdThr_pid=%d!\n", GET_PID_NUMBER(pObj->RTUSBCmdThr_pid));
917 pid_nr(task_pid(pObj->RTUSBCmdThr_task)));
918 mb(); 931 mb();
919 NdisAcquireSpinLock(&pAd->CmdQLock); 932 NdisAcquireSpinLock(&pAd->CmdQLock);
920 pAd->CmdQ.CmdQState = RT2870_THREAD_STOPED; 933 pAd->CmdQ.CmdQState = RT2870_THREAD_STOPED;
921 NdisReleaseSpinLock(&pAd->CmdQLock); 934 NdisReleaseSpinLock(&pAd->CmdQLock);
922 mb(); 935 mb();
923 //RTUSBCMDUp(pAd); 936 //RTUSBCMDUp(pAd);
924 kthread_stop(pObj->RTUSBCmdThr_task); 937 ret = KILL_THREAD_PID(pObj->RTUSBCmdThr_pid, SIGTERM, 1);
925 pObj->RTUSBCmdThr_task = NULL; 938 if (ret)
939 {
940 printk(KERN_WARNING "%s: unable to RTUSBCmd thread, pid=%d, ret=%d!\n",
941 pAd->net_dev->name, GET_PID_NUMBER(pObj->RTUSBCmdThr_pid), ret);
942 }
943 else
944 {
945 //wait_for_completion (&pAd->notify);
946 wait_for_completion (&pAd->CmdQComplete);
947 pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE;
948 }
926 } 949 }
927#endif 950#endif
928#ifdef RT30xx 951#ifdef RT30xx
@@ -1045,7 +1068,7 @@ BOOLEAN RT28XXChipsetCheck(
1045 dev_p->descriptor.idProduct == rtusb_usb_id[i].idProduct) 1068 dev_p->descriptor.idProduct == rtusb_usb_id[i].idProduct)
1046 { 1069 {
1047#ifndef RT30xx 1070#ifndef RT30xx
1048 printk(KERN_DEBUG "rt2870: idVendor = 0x%x, idProduct = 0x%x\n", 1071 printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n",
1049#endif 1072#endif
1050#ifdef RT30xx 1073#ifdef RT30xx
1051 printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n", 1074 printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n",
diff --git a/drivers/staging/rt2870/common/2870_rtmp_init.c b/drivers/staging/rt2870/common/2870_rtmp_init.c
index 0f4c8af97e47..80909e9ab5ae 100644
--- a/drivers/staging/rt2870/common/2870_rtmp_init.c
+++ b/drivers/staging/rt2870/common/2870_rtmp_init.c
@@ -700,8 +700,8 @@ NDIS_STATUS AdapterBlockAllocateMemory(
700 usb_dev = pObj->pUsb_Dev; 700 usb_dev = pObj->pUsb_Dev;
701 701
702#ifndef RT30xx 702#ifndef RT30xx
703 pObj->MLMEThr_task = NULL; 703 pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE;
704 pObj->RTUSBCmdThr_task = NULL; 704 pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE;
705#endif 705#endif
706#ifdef RT30xx 706#ifdef RT30xx
707 pObj->MLMEThr_pid = NULL; 707 pObj->MLMEThr_pid = NULL;
@@ -743,7 +743,7 @@ NDIS_STATUS CreateThreads(
743 PRTMP_ADAPTER pAd = net_dev->ml_priv; 743 PRTMP_ADAPTER pAd = net_dev->ml_priv;
744 POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; 744 POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie;
745#ifndef RT30xx 745#ifndef RT30xx
746 struct task_struct *tsk; 746 pid_t pid_number = -1;
747#endif 747#endif
748#ifdef RT30xx 748#ifdef RT30xx
749 pid_t pid_number; 749 pid_t pid_number;
@@ -762,10 +762,10 @@ NDIS_STATUS CreateThreads(
762 762
763 // Creat MLME Thread 763 // Creat MLME Thread
764#ifndef RT30xx 764#ifndef RT30xx
765 pObj->MLMEThr_task = NULL; 765 pObj->MLMEThr_pid= THREAD_PID_INIT_VALUE;
766 tsk = kthread_run(MlmeThread, pAd, "%s", pAd->net_dev->name); 766 pid_number = kernel_thread(MlmeThread, pAd, CLONE_VM);
767 767 if (pid_number < 0)
768 if (IS_ERR(tsk)) { 768 {
769#endif 769#endif
770#ifdef RT30xx 770#ifdef RT30xx
771 pObj->MLMEThr_pid = NULL; 771 pObj->MLMEThr_pid = NULL;
@@ -778,7 +778,7 @@ NDIS_STATUS CreateThreads(
778 } 778 }
779 779
780#ifndef RT30xx 780#ifndef RT30xx
781 pObj->MLMEThr_task = tsk; 781 pObj->MLMEThr_pid = GET_PID(pid_number);
782#endif 782#endif
783#ifdef RT30xx 783#ifdef RT30xx
784 pObj->MLMEThr_pid = find_get_pid(pid_number); 784 pObj->MLMEThr_pid = find_get_pid(pid_number);
@@ -788,10 +788,9 @@ NDIS_STATUS CreateThreads(
788 788
789 // Creat Command Thread 789 // Creat Command Thread
790#ifndef RT30xx 790#ifndef RT30xx
791 pObj->RTUSBCmdThr_task = NULL; 791 pObj->RTUSBCmdThr_pid= THREAD_PID_INIT_VALUE;
792 tsk = kthread_run(RTUSBCmdThread, pAd, "%s", pAd->net_dev->name); 792 pid_number = kernel_thread(RTUSBCmdThread, pAd, CLONE_VM);
793 793 if (pid_number < 0)
794 if (IS_ERR(tsk) < 0)
795#endif 794#endif
796#ifdef RT30xx 795#ifdef RT30xx
797 pObj->RTUSBCmdThr_pid = NULL; 796 pObj->RTUSBCmdThr_pid = NULL;
@@ -804,7 +803,7 @@ NDIS_STATUS CreateThreads(
804 } 803 }
805 804
806#ifndef RT30xx 805#ifndef RT30xx
807 pObj->RTUSBCmdThr_task = tsk; 806 pObj->RTUSBCmdThr_pid = GET_PID(pid_number);
808#endif 807#endif
809#ifdef RT30xx 808#ifdef RT30xx
810 pObj->RTUSBCmdThr_pid = find_get_pid(pid_number); 809 pObj->RTUSBCmdThr_pid = find_get_pid(pid_number);
@@ -812,9 +811,9 @@ NDIS_STATUS CreateThreads(
812 wait_for_completion(&(pAd->CmdQComplete)); 811 wait_for_completion(&(pAd->CmdQComplete));
813 812
814#ifndef RT30xx 813#ifndef RT30xx
815 pObj->TimerQThr_task = NULL; 814 pObj->TimerQThr_pid= THREAD_PID_INIT_VALUE;
816 tsk = kthread_run(TimerQThread, pAd, "%s", pAd->net_dev->name); 815 pid_number = kernel_thread(TimerQThread, pAd, CLONE_VM);
817 if (IS_ERR(tsk) < 0) 816 if (pid_number < 0)
818#endif 817#endif
819#ifdef RT30xx 818#ifdef RT30xx
820 pObj->TimerQThr_pid = NULL; 819 pObj->TimerQThr_pid = NULL;
@@ -826,7 +825,7 @@ NDIS_STATUS CreateThreads(
826 return NDIS_STATUS_FAILURE; 825 return NDIS_STATUS_FAILURE;
827 } 826 }
828#ifndef RT30xx 827#ifndef RT30xx
829 pObj->TimerQThr_task = tsk; 828 pObj->TimerQThr_pid = GET_PID(pid_number);
830#endif 829#endif
831#ifdef RT30xx 830#ifdef RT30xx
832 pObj->TimerQThr_pid = find_get_pid(pid_number); 831 pObj->TimerQThr_pid = find_get_pid(pid_number);
diff --git a/drivers/staging/rt2870/common/rtusb_io.c b/drivers/staging/rt2870/common/rtusb_io.c
index fd1b0c18f2a0..704b5c2d5091 100644
--- a/drivers/staging/rt2870/common/rtusb_io.c
+++ b/drivers/staging/rt2870/common/rtusb_io.c
@@ -984,8 +984,7 @@ NDIS_STATUS RTUSBEnqueueCmdFromNdis(
984 POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; 984 POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie;
985 985
986#ifndef RT30xx 986#ifndef RT30xx
987 BUG_ON(pObj->RTUSBCmdThr_task == NULL); 987 CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid)
988 CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task))
989#endif 988#endif
990#ifdef RT30xx 989#ifdef RT30xx
991 if (pObj->RTUSBCmdThr_pid < 0) 990 if (pObj->RTUSBCmdThr_pid < 0)
diff --git a/drivers/staging/rt2870/rt2870.h b/drivers/staging/rt2870/rt2870.h
index 5e5b3f2b7eb1..2b8872b2fd9d 100644
--- a/drivers/staging/rt2870/rt2870.h
+++ b/drivers/staging/rt2870/rt2870.h
@@ -79,6 +79,7 @@
79{ \ 79{ \
80 {USB_DEVICE(0x148F,0x2770)}, /* Ralink */ \ 80 {USB_DEVICE(0x148F,0x2770)}, /* Ralink */ \
81 {USB_DEVICE(0x1737,0x0071)}, /* Linksys WUSB600N */ \ 81 {USB_DEVICE(0x1737,0x0071)}, /* Linksys WUSB600N */ \
82 {USB_DEVICE(0x1737,0x0070)}, /* Linksys */ \
82 {USB_DEVICE(0x148F,0x2870)}, /* Ralink */ \ 83 {USB_DEVICE(0x148F,0x2870)}, /* Ralink */ \
83 {USB_DEVICE(0x148F,0x3070)}, /* Ralink */ \ 84 {USB_DEVICE(0x148F,0x3070)}, /* Ralink */ \
84 {USB_DEVICE(0x0B05,0x1731)}, /* Asus */ \ 85 {USB_DEVICE(0x0B05,0x1731)}, /* Asus */ \
@@ -89,15 +90,18 @@
89 {USB_DEVICE(0x0DF6,0x002C)}, /* Sitecom */ \ 90 {USB_DEVICE(0x0DF6,0x002C)}, /* Sitecom */ \
90 {USB_DEVICE(0x0DF6,0x002D)}, /* Sitecom */ \ 91 {USB_DEVICE(0x0DF6,0x002D)}, /* Sitecom */ \
91 {USB_DEVICE(0x0DF6,0x0039)}, /* Sitecom */ \ 92 {USB_DEVICE(0x0DF6,0x0039)}, /* Sitecom */ \
93 {USB_DEVICE(0x0DF6,0x003F)}, /* Sitecom WL-608 */ \
92 {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \ 94 {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \
93 {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \ 95 {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \
94 {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \ 96 {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \
97 {USB_DEVICE(0x2019,0xED14)}, /* Planex Communications, Inc. */ \
95 {USB_DEVICE(0x2019,0xAB25)}, /* Planex Communications, Inc. RT3070 */ \ 98 {USB_DEVICE(0x2019,0xAB25)}, /* Planex Communications, Inc. RT3070 */ \
96 {USB_DEVICE(0x07D1,0x3C09)}, /* D-Link */ \ 99 {USB_DEVICE(0x07D1,0x3C09)}, /* D-Link */ \
97 {USB_DEVICE(0x07D1,0x3C11)}, /* D-Link */ \ 100 {USB_DEVICE(0x07D1,0x3C11)}, /* D-Link */ \
98 {USB_DEVICE(0x14B2,0x3C07)}, /* AL */ \ 101 {USB_DEVICE(0x14B2,0x3C07)}, /* AL */ \
99 {USB_DEVICE(0x14B2,0x3C12)}, /* AL */ \ 102 {USB_DEVICE(0x14B2,0x3C12)}, /* AL */ \
100 {USB_DEVICE(0x050D,0x8053)}, /* Belkin */ \ 103 {USB_DEVICE(0x050D,0x8053)}, /* Belkin */ \
104 {USB_DEVICE(0x050D,0x815C)}, /* Belkin */ \
101 {USB_DEVICE(0x14B2,0x3C23)}, /* Airlink */ \ 105 {USB_DEVICE(0x14B2,0x3C23)}, /* Airlink */ \
102 {USB_DEVICE(0x14B2,0x3C27)}, /* Airlink */ \ 106 {USB_DEVICE(0x14B2,0x3C27)}, /* Airlink */ \
103 {USB_DEVICE(0x07AA,0x002F)}, /* Corega */ \ 107 {USB_DEVICE(0x07AA,0x002F)}, /* Corega */ \
@@ -586,16 +590,14 @@ VOID RTUSBBulkRxComplete(purbb_t pUrb, struct pt_regs *pt_regs);
586#define RTUSBMlmeUp(pAd) \ 590#define RTUSBMlmeUp(pAd) \
587{ \ 591{ \
588 POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \ 592 POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \
589 BUG_ON(pObj->MLMEThr_task == NULL); \ 593 CHECK_PID_LEGALITY(pObj->MLMEThr_pid) \
590 CHECK_PID_LEGALITY(task_pid(pObj->MLMEThr_task)) \
591 up(&(pAd->mlme_semaphore)); \ 594 up(&(pAd->mlme_semaphore)); \
592} 595}
593 596
594#define RTUSBCMDUp(pAd) \ 597#define RTUSBCMDUp(pAd) \
595{ \ 598{ \
596 POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \ 599 POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \
597 BUG_ON(pObj->RTUSBCmdThr_task == NULL); \ 600 CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid) \
598 CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task)) \
599 up(&(pAd->RTUSBCmd_semaphore)); \ 601 up(&(pAd->RTUSBCmd_semaphore)); \
600} 602}
601#endif 603#endif
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
index 93af37e2d31a..54b4b718f84a 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
@@ -461,19 +461,19 @@ int ieee80211_wx_get_name(struct ieee80211_device *ieee,
461 struct iw_request_info *info, 461 struct iw_request_info *info,
462 union iwreq_data *wrqu, char *extra) 462 union iwreq_data *wrqu, char *extra)
463{ 463{
464 strcpy(wrqu->name, "802.11"); 464 strlcpy(wrqu->name, "802.11", IFNAMSIZ);
465 if(ieee->modulation & IEEE80211_CCK_MODULATION){ 465 if(ieee->modulation & IEEE80211_CCK_MODULATION){
466 strcat(wrqu->name, "b"); 466 strlcat(wrqu->name, "b", IFNAMSIZ);
467 if(ieee->modulation & IEEE80211_OFDM_MODULATION) 467 if(ieee->modulation & IEEE80211_OFDM_MODULATION)
468 strcat(wrqu->name, "/g"); 468 strlcat(wrqu->name, "/g", IFNAMSIZ);
469 }else if(ieee->modulation & IEEE80211_OFDM_MODULATION) 469 }else if(ieee->modulation & IEEE80211_OFDM_MODULATION)
470 strcat(wrqu->name, "g"); 470 strlcat(wrqu->name, "g", IFNAMSIZ);
471 471
472 if((ieee->state == IEEE80211_LINKED) || 472 if((ieee->state == IEEE80211_LINKED) ||
473 (ieee->state == IEEE80211_LINKED_SCANNING)) 473 (ieee->state == IEEE80211_LINKED_SCANNING))
474 strcat(wrqu->name," linked"); 474 strlcat(wrqu->name," link", IFNAMSIZ);
475 else if(ieee->state != IEEE80211_NOLINK) 475 else if(ieee->state != IEEE80211_NOLINK)
476 strcat(wrqu->name," link.."); 476 strlcat(wrqu->name," .....", IFNAMSIZ);
477 477
478 478
479 return 0; 479 return 0;
diff --git a/drivers/staging/rtl8192su/Kconfig b/drivers/staging/rtl8192su/Kconfig
index 4b5552c5926e..770f41280f21 100644
--- a/drivers/staging/rtl8192su/Kconfig
+++ b/drivers/staging/rtl8192su/Kconfig
@@ -1,6 +1,6 @@
1config RTL8192SU 1config RTL8192SU
2 tristate "RealTek RTL8192SU Wireless LAN NIC driver" 2 tristate "RealTek RTL8192SU Wireless LAN NIC driver"
3 depends on PCI 3 depends on PCI
4 depends on WIRELESS_EXT && COMPAT_NET_DEV_OPS 4 depends on WIRELESS_EXT
5 default N 5 default N
6 ---help--- 6 ---help---
diff --git a/drivers/staging/rtl8192su/ieee80211.h b/drivers/staging/rtl8192su/ieee80211.h
index 0edb09a536f9..ea9739318037 100644
--- a/drivers/staging/rtl8192su/ieee80211.h
+++ b/drivers/staging/rtl8192su/ieee80211.h
@@ -2645,7 +2645,7 @@ extern int ieee80211_encrypt_fragment(
2645 struct sk_buff *frag, 2645 struct sk_buff *frag,
2646 int hdr_len); 2646 int hdr_len);
2647 2647
2648extern int ieee80211_xmit(struct sk_buff *skb, 2648extern int rtl8192_ieee80211_xmit(struct sk_buff *skb,
2649 struct net_device *dev); 2649 struct net_device *dev);
2650extern void ieee80211_txb_free(struct ieee80211_txb *); 2650extern void ieee80211_txb_free(struct ieee80211_txb *);
2651 2651
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211.h b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
index 720bfcbfadc1..5e3a2cbed2b1 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
@@ -2645,7 +2645,7 @@ extern int ieee80211_encrypt_fragment(
2645 struct sk_buff *frag, 2645 struct sk_buff *frag,
2646 int hdr_len); 2646 int hdr_len);
2647 2647
2648extern int ieee80211_xmit(struct sk_buff *skb, 2648extern int rtl8192_ieee80211_xmit(struct sk_buff *skb,
2649 struct net_device *dev); 2649 struct net_device *dev);
2650extern void ieee80211_txb_free(struct ieee80211_txb *); 2650extern void ieee80211_txb_free(struct ieee80211_txb *);
2651 2651
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
index f408b4583b82..759032db4a34 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
@@ -118,7 +118,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
118#else 118#else
119 ieee = (struct ieee80211_device *)dev->priv; 119 ieee = (struct ieee80211_device *)dev->priv;
120#endif 120#endif
121 dev->hard_start_xmit = ieee80211_xmit;
122 121
123 memset(ieee, 0, sizeof(struct ieee80211_device)+sizeof_priv); 122 memset(ieee, 0, sizeof(struct ieee80211_device)+sizeof_priv);
124 ieee->dev = dev; 123 ieee->dev = dev;
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c
index 1f50c46dcb90..191dc3fbbe32 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c
@@ -548,21 +548,21 @@ int ieee80211_wx_get_name(struct ieee80211_device *ieee,
548 struct iw_request_info *info, 548 struct iw_request_info *info,
549 union iwreq_data *wrqu, char *extra) 549 union iwreq_data *wrqu, char *extra)
550{ 550{
551 strcpy(wrqu->name, "802.11"); 551 strlcpy(wrqu->name, "802.11", IFNAMSIZ);
552 if(ieee->modulation & IEEE80211_CCK_MODULATION){ 552 if(ieee->modulation & IEEE80211_CCK_MODULATION){
553 strcat(wrqu->name, "b"); 553 strlcat(wrqu->name, "b", IFNAMSIZ);
554 if(ieee->modulation & IEEE80211_OFDM_MODULATION) 554 if(ieee->modulation & IEEE80211_OFDM_MODULATION)
555 strcat(wrqu->name, "/g"); 555 strlcat(wrqu->name, "/g", IFNAMSIZ);
556 }else if(ieee->modulation & IEEE80211_OFDM_MODULATION) 556 }else if(ieee->modulation & IEEE80211_OFDM_MODULATION)
557 strcat(wrqu->name, "g"); 557 strlcat(wrqu->name, "g", IFNAMSIZ);
558 if (ieee->mode & (IEEE_N_24G | IEEE_N_5G)) 558 if (ieee->mode & (IEEE_N_24G | IEEE_N_5G))
559 strcat(wrqu->name, "/n"); 559 strlcat(wrqu->name, "/n", IFNAMSIZ);
560 560
561 if((ieee->state == IEEE80211_LINKED) || 561 if((ieee->state == IEEE80211_LINKED) ||
562 (ieee->state == IEEE80211_LINKED_SCANNING)) 562 (ieee->state == IEEE80211_LINKED_SCANNING))
563 strcat(wrqu->name," linked"); 563 strlcat(wrqu->name, " link", IFNAMSIZ);
564 else if(ieee->state != IEEE80211_NOLINK) 564 else if(ieee->state != IEEE80211_NOLINK)
565 strcat(wrqu->name," link.."); 565 strlcat(wrqu->name, " .....", IFNAMSIZ);
566 566
567 567
568 return 0; 568 return 0;
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
index 7294572b990f..cba12b84be5c 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
@@ -618,7 +618,7 @@ void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u
618 } 618 }
619} 619}
620 620
621int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) 621int rtl8192_ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
622{ 622{
623#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) 623#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
624 struct ieee80211_device *ieee = netdev_priv(dev); 624 struct ieee80211_device *ieee = netdev_priv(dev);
@@ -943,5 +943,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
943 return 1; 943 return 1;
944 944
945} 945}
946EXPORT_SYMBOL(rtl8192_ieee80211_xmit);
946 947
947EXPORT_SYMBOL(ieee80211_txb_free); 948EXPORT_SYMBOL(ieee80211_txb_free);
diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c
index f1423d714496..70f81a8f1291 100644
--- a/drivers/staging/rtl8192su/r8192U_core.c
+++ b/drivers/staging/rtl8192su/r8192U_core.c
@@ -12132,6 +12132,19 @@ static void HalUsbSetQueuePipeMapping8192SUsb(struct usb_interface *intf, struct
12132} 12132}
12133#endif 12133#endif
12134 12134
12135static const struct net_device_ops rtl8192_netdev_ops = {
12136 .ndo_open = rtl8192_open,
12137 .ndo_stop = rtl8192_close,
12138 .ndo_get_stats = rtl8192_stats,
12139 .ndo_tx_timeout = tx_timeout,
12140 .ndo_do_ioctl = rtl8192_ioctl,
12141 .ndo_set_multicast_list = r8192_set_multicast,
12142 .ndo_set_mac_address = r8192_set_mac_adr,
12143 .ndo_validate_addr = eth_validate_addr,
12144 .ndo_change_mtu = eth_change_mtu,
12145 .ndo_start_xmit = rtl8192_ieee80211_xmit,
12146};
12147
12135#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) 12148#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
12136static int __devinit rtl8192_usb_probe(struct usb_interface *intf, 12149static int __devinit rtl8192_usb_probe(struct usb_interface *intf,
12137 const struct usb_device_id *id) 12150 const struct usb_device_id *id)
@@ -12186,15 +12199,7 @@ static void * __devinit rtl8192_usb_probe(struct usb_device *udev,
12186 priv->ops = &rtl8192u_ops; 12199 priv->ops = &rtl8192u_ops;
12187#endif 12200#endif
12188 12201
12189 dev->open = rtl8192_open; 12202 dev->netdev_ops = &rtl8192_netdev_ops;
12190 dev->stop = rtl8192_close;
12191 //dev->hard_start_xmit = rtl8192_8023_hard_start_xmit;
12192 dev->tx_timeout = tx_timeout;
12193 //dev->wireless_handlers = &r8192_wx_handlers_def;
12194 dev->do_ioctl = rtl8192_ioctl;
12195 dev->set_multicast_list = r8192_set_multicast;
12196 dev->set_mac_address = r8192_set_mac_adr;
12197 dev->get_stats = rtl8192_stats;
12198 12203
12199 //DMESG("Oops: i'm coming\n"); 12204 //DMESG("Oops: i'm coming\n");
12200#if WIRELESS_EXT >= 12 12205#if WIRELESS_EXT >= 12
diff --git a/drivers/staging/rtl8192su/r8192U_pm.c b/drivers/staging/rtl8192su/r8192U_pm.c
index 92c95aa36638..b1531a8d0cde 100644
--- a/drivers/staging/rtl8192su/r8192U_pm.c
+++ b/drivers/staging/rtl8192su/r8192U_pm.c
@@ -35,7 +35,9 @@ int rtl8192U_suspend(struct usb_interface *intf, pm_message_t state)
35 return 0; 35 return 0;
36 } 36 }
37 37
38 dev->stop(dev); 38 if (dev->netdev_ops->ndo_stop)
39 dev->netdev_ops->ndo_stop(dev);
40
39 mdelay(10); 41 mdelay(10);
40 42
41 netif_device_detach(dev); 43 netif_device_detach(dev);
@@ -61,7 +63,9 @@ int rtl8192U_resume (struct usb_interface *intf)
61 } 63 }
62 64
63 netif_device_attach(dev); 65 netif_device_attach(dev);
64 dev->open(dev); 66
67 if (dev->netdev_ops->ndo_open)
68 dev->netdev_ops->ndo_open(dev);
65 } 69 }
66 70
67 return 0; 71 return 0;
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 90b29b564631..0fdf8c6dc648 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -360,18 +360,18 @@ static void qt_read_bulk_callback(struct urb *urb)
360 if (port_paranoia_check(port, __func__) != 0) { 360 if (port_paranoia_check(port, __func__) != 0) {
361 dbg("%s - port_paranoia_check, exiting\n", __func__); 361 dbg("%s - port_paranoia_check, exiting\n", __func__);
362 qt_port->ReadBulkStopped = 1; 362 qt_port->ReadBulkStopped = 1;
363 return; 363 goto exit;
364 } 364 }
365 365
366 if (!serial) { 366 if (!serial) {
367 dbg("%s - bad serial pointer, exiting\n", __func__); 367 dbg("%s - bad serial pointer, exiting\n", __func__);
368 return; 368 goto exit;
369 } 369 }
370 if (qt_port->closePending == 1) { 370 if (qt_port->closePending == 1) {
371 /* Were closing , stop reading */ 371 /* Were closing , stop reading */
372 dbg("%s - (qt_port->closepending == 1\n", __func__); 372 dbg("%s - (qt_port->closepending == 1\n", __func__);
373 qt_port->ReadBulkStopped = 1; 373 qt_port->ReadBulkStopped = 1;
374 return; 374 goto exit;
375 } 375 }
376 376
377 /* 377 /*
@@ -381,7 +381,7 @@ static void qt_read_bulk_callback(struct urb *urb)
381 */ 381 */
382 if (qt_port->RxHolding == 1) { 382 if (qt_port->RxHolding == 1) {
383 qt_port->ReadBulkStopped = 1; 383 qt_port->ReadBulkStopped = 1;
384 return; 384 goto exit;
385 } 385 }
386 386
387 if (urb->status) { 387 if (urb->status) {
@@ -389,7 +389,7 @@ static void qt_read_bulk_callback(struct urb *urb)
389 389
390 dbg("%s - nonzero read bulk status received: %d\n", 390 dbg("%s - nonzero read bulk status received: %d\n",
391 __func__, urb->status); 391 __func__, urb->status);
392 return; 392 goto exit;
393 } 393 }
394 394
395 if (tty && RxCount) { 395 if (tty && RxCount) {
@@ -463,6 +463,8 @@ static void qt_read_bulk_callback(struct urb *urb)
463 } 463 }
464 464
465 schedule_work(&port->work); 465 schedule_work(&port->work);
466exit:
467 tty_kref_put(tty);
466} 468}
467 469
468/* 470/*
@@ -736,6 +738,11 @@ static int qt_startup(struct usb_serial *serial)
736 if (!qt_port) { 738 if (!qt_port) {
737 dbg("%s: kmalloc for quatech_port (%d) failed!.", 739 dbg("%s: kmalloc for quatech_port (%d) failed!.",
738 __func__, i); 740 __func__, i);
741 for(--i; i >= 0; i--) {
742 port = serial->port[i];
743 kfree(usb_get_serial_port_data(port));
744 usb_set_serial_port_data(port, NULL);
745 }
739 return -ENOMEM; 746 return -ENOMEM;
740 } 747 }
741 spin_lock_init(&qt_port->lock); 748 spin_lock_init(&qt_port->lock);
@@ -866,7 +873,7 @@ static void qt_release(struct usb_serial *serial)
866 873
867} 874}
868 875
869int qt_open(struct tty_struct *tty, 876static int qt_open(struct tty_struct *tty,
870 struct usb_serial_port *port, struct file *filp) 877 struct usb_serial_port *port, struct file *filp)
871{ 878{
872 struct usb_serial *serial; 879 struct usb_serial *serial;
@@ -1041,17 +1048,19 @@ static void qt_block_until_empty(struct tty_struct *tty,
1041 } 1048 }
1042} 1049}
1043 1050
1044static void qt_close(struct tty_struct *tty, struct usb_serial_port *port, 1051static void qt_close(struct usb_serial_port *port)
1045 struct file *filp)
1046{ 1052{
1047 struct usb_serial *serial = port->serial; 1053 struct usb_serial *serial = port->serial;
1048 struct quatech_port *qt_port; 1054 struct quatech_port *qt_port;
1049 struct quatech_port *port0; 1055 struct quatech_port *port0;
1056 struct tty_struct *tty;
1050 int status; 1057 int status;
1051 unsigned int index; 1058 unsigned int index;
1052 status = 0; 1059 status = 0;
1053 1060
1054 dbg("%s - port %d\n", __func__, port->number); 1061 dbg("%s - port %d\n", __func__, port->number);
1062
1063 tty = tty_port_tty_get(&port->port);
1055 index = tty->index - serial->minor; 1064 index = tty->index - serial->minor;
1056 1065
1057 qt_port = qt_get_port_private(port); 1066 qt_port = qt_get_port_private(port);
@@ -1066,6 +1075,7 @@ static void qt_close(struct tty_struct *tty, struct usb_serial_port *port,
1066 /* wait up to for transmitter to empty */ 1075 /* wait up to for transmitter to empty */
1067 if (serial->dev) 1076 if (serial->dev)
1068 qt_block_until_empty(tty, qt_port); 1077 qt_block_until_empty(tty, qt_port);
1078 tty_kref_put(tty);
1069 1079
1070 /* Close uart channel */ 1080 /* Close uart channel */
1071 status = qt_close_channel(serial, index); 1081 status = qt_close_channel(serial, index);
diff --git a/drivers/staging/stlc45xx/stlc45xx.c b/drivers/staging/stlc45xx/stlc45xx.c
index cfdaac9b747e..a137c78fac09 100644
--- a/drivers/staging/stlc45xx/stlc45xx.c
+++ b/drivers/staging/stlc45xx/stlc45xx.c
@@ -2235,24 +2235,6 @@ static void stlc45xx_op_remove_interface(struct ieee80211_hw *hw,
2235 stlc45xx_debug(DEBUG_FUNC, "%s", __func__); 2235 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2236} 2236}
2237 2237
2238static int stlc45xx_op_config_interface(struct ieee80211_hw *hw,
2239 struct ieee80211_vif *vif,
2240 struct ieee80211_if_conf *conf)
2241{
2242 struct stlc45xx *stlc = hw->priv;
2243
2244 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2245
2246 mutex_lock(&stlc->mutex);
2247
2248 memcpy(stlc->bssid, conf->bssid, ETH_ALEN);
2249 stlc45xx_tx_setup(stlc);
2250
2251 mutex_unlock(&stlc->mutex);
2252
2253 return 0;
2254}
2255
2256static int stlc45xx_op_config(struct ieee80211_hw *hw, u32 changed) 2238static int stlc45xx_op_config(struct ieee80211_hw *hw, u32 changed)
2257{ 2239{
2258 struct stlc45xx *stlc = hw->priv; 2240 struct stlc45xx *stlc = hw->priv;
@@ -2295,6 +2277,14 @@ static void stlc45xx_op_bss_info_changed(struct ieee80211_hw *hw,
2295{ 2277{
2296 struct stlc45xx *stlc = hw->priv; 2278 struct stlc45xx *stlc = hw->priv;
2297 2279
2280 stlc45xx_debug(DEBUG_FUNC, "%s", __func__);
2281 mutex_lock(&stlc->mutex);
2282
2283 memcpy(stlc->bssid, info->bssid, ETH_ALEN);
2284 stlc45xx_tx_setup(stlc);
2285
2286 mutex_unlock(&stlc->mutex);
2287
2298 if (changed & BSS_CHANGED_ASSOC) { 2288 if (changed & BSS_CHANGED_ASSOC) {
2299 stlc->associated = info->assoc; 2289 stlc->associated = info->assoc;
2300 if (info->assoc) 2290 if (info->assoc)
@@ -2357,7 +2347,6 @@ static const struct ieee80211_ops stlc45xx_ops = {
2357 .add_interface = stlc45xx_op_add_interface, 2347 .add_interface = stlc45xx_op_add_interface,
2358 .remove_interface = stlc45xx_op_remove_interface, 2348 .remove_interface = stlc45xx_op_remove_interface,
2359 .config = stlc45xx_op_config, 2349 .config = stlc45xx_op_config,
2360 .config_interface = stlc45xx_op_config_interface,
2361 .configure_filter = stlc45xx_op_configure_filter, 2350 .configure_filter = stlc45xx_op_configure_filter,
2362 .tx = stlc45xx_op_tx, 2351 .tx = stlc45xx_op_tx,
2363 .bss_info_changed = stlc45xx_op_bss_info_changed, 2352 .bss_info_changed = stlc45xx_op_bss_info_changed,
diff --git a/drivers/staging/uc2322/Kconfig b/drivers/staging/uc2322/Kconfig
deleted file mode 100644
index 2e0c6e79df2b..000000000000
--- a/drivers/staging/uc2322/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
1config USB_SERIAL_ATEN2011
2 tristate "ATEN 2011 USB to serial device support"
3 depends on USB_SERIAL
4 default N
5 ---help---
6 Say Y here if you want to use a ATEN 2011 dual port USB to serial
7 adapter.
8
9 To compile this driver as a module, choose M here: the module will be
10 called aten2011.
diff --git a/drivers/staging/uc2322/Makefile b/drivers/staging/uc2322/Makefile
deleted file mode 100644
index 49c18d6e579f..000000000000
--- a/drivers/staging/uc2322/Makefile
+++ /dev/null
@@ -1 +0,0 @@
1obj-$(CONFIG_USB_SERIAL_ATEN2011) += aten2011.o
diff --git a/drivers/staging/uc2322/TODO b/drivers/staging/uc2322/TODO
deleted file mode 100644
index c189a64c4185..000000000000
--- a/drivers/staging/uc2322/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
1TODO:
2 - checkpatch.pl cleanups
3 - remove dead and useless code (auditing the tty ioctls to
4 verify that they really are correct and needed.)
5
6Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and
7Russell Lang <gsview@ghostgum.com.au>.
diff --git a/drivers/staging/uc2322/aten2011.c b/drivers/staging/uc2322/aten2011.c
deleted file mode 100644
index 39d0926d1a90..000000000000
--- a/drivers/staging/uc2322/aten2011.c
+++ /dev/null
@@ -1,2430 +0,0 @@
1/*
2 * Aten 2011 USB serial driver for 4 port devices
3 *
4 * Copyright (C) 2000 Inside Out Networks
5 * Copyright (C) 2001-2002, 2009 Greg Kroah-Hartman <greg@kroah.com>
6 * Copyright (C) 2009 Novell Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/tty.h>
20#include <linux/tty_driver.h>
21#include <linux/tty_flip.h>
22#include <linux/module.h>
23#include <linux/serial.h>
24#include <linux/uaccess.h>
25#include <linux/usb.h>
26#include <linux/usb/serial.h>
27
28
29#define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */
30#define ZLP_REG2 0x3B /* Zero_Flag_Reg2 59 */
31#define ZLP_REG3 0x3C /* Zero_Flag_Reg3 60 */
32#define ZLP_REG4 0x3D /* Zero_Flag_Reg4 61 */
33#define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */
34
35/* Interrupt Rotinue Defines */
36#define SERIAL_IIR_RLS 0x06
37#define SERIAL_IIR_RDA 0x04
38#define SERIAL_IIR_CTI 0x0c
39#define SERIAL_IIR_THR 0x02
40#define SERIAL_IIR_MS 0x00
41
42/* Emulation of the bit mask on the LINE STATUS REGISTER. */
43#define SERIAL_LSR_DR 0x0001
44#define SERIAL_LSR_OE 0x0002
45#define SERIAL_LSR_PE 0x0004
46#define SERIAL_LSR_FE 0x0008
47#define SERIAL_LSR_BI 0x0010
48#define SERIAL_LSR_THRE 0x0020
49#define SERIAL_LSR_TEMT 0x0040
50#define SERIAL_LSR_FIFOERR 0x0080
51
52/* MSR bit defines(place holders) */
53#define ATEN_MSR_DELTA_CTS 0x10
54#define ATEN_MSR_DELTA_DSR 0x20
55#define ATEN_MSR_DELTA_RI 0x40
56#define ATEN_MSR_DELTA_CD 0x80
57
58/* Serial Port register Address */
59#define RECEIVE_BUFFER_REGISTER ((__u16)(0x00))
60#define TRANSMIT_HOLDING_REGISTER ((__u16)(0x00))
61#define INTERRUPT_ENABLE_REGISTER ((__u16)(0x01))
62#define INTERRUPT_IDENT_REGISTER ((__u16)(0x02))
63#define FIFO_CONTROL_REGISTER ((__u16)(0x02))
64#define LINE_CONTROL_REGISTER ((__u16)(0x03))
65#define MODEM_CONTROL_REGISTER ((__u16)(0x04))
66#define LINE_STATUS_REGISTER ((__u16)(0x05))
67#define MODEM_STATUS_REGISTER ((__u16)(0x06))
68#define SCRATCH_PAD_REGISTER ((__u16)(0x07))
69#define DIVISOR_LATCH_LSB ((__u16)(0x00))
70#define DIVISOR_LATCH_MSB ((__u16)(0x01))
71
72#define SP1_REGISTER ((__u16)(0x00))
73#define CONTROL1_REGISTER ((__u16)(0x01))
74#define CLK_MULTI_REGISTER ((__u16)(0x02))
75#define CLK_START_VALUE_REGISTER ((__u16)(0x03))
76#define DCR1_REGISTER ((__u16)(0x04))
77#define GPIO_REGISTER ((__u16)(0x07))
78
79#define SERIAL_LCR_DLAB ((__u16)(0x0080))
80
81/*
82 * URB POOL related defines
83 */
84#define NUM_URBS 16 /* URB Count */
85#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
86
87#define USB_VENDOR_ID_ATENINTL 0x0557
88#define ATENINTL_DEVICE_ID_2011 0x2011
89#define ATENINTL_DEVICE_ID_7820 0x7820
90
91static struct usb_device_id id_table[] = {
92 { USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_2011) },
93 { USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_7820) },
94 { } /* terminating entry */
95};
96MODULE_DEVICE_TABLE(usb, id_table);
97
98/* This structure holds all of the local port information */
99struct ATENINTL_port {
100 int port_num; /*Actual port number in the device(1,2,etc)*/
101 __u8 bulk_out_endpoint; /* the bulk out endpoint handle */
102 unsigned char *bulk_out_buffer; /* buffer used for the bulk out endpoint */
103 struct urb *write_urb; /* write URB for this port */
104 __u8 bulk_in_endpoint; /* the bulk in endpoint handle */
105 unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */
106 struct urb *read_urb; /* read URB for this port */
107 __u8 shadowLCR; /* last LCR value received */
108 __u8 shadowMCR; /* last MCR value received */
109 char open;
110 char chaseResponsePending;
111 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
112 wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */
113 struct async_icount icount;
114 struct usb_serial_port *port; /* loop back to the owner of this object */
115 /*Offsets*/
116 __u8 SpRegOffset;
117 __u8 ControlRegOffset;
118 __u8 DcrRegOffset;
119 /* for processing control URBS in interrupt context */
120 struct urb *control_urb;
121 char *ctrl_buf;
122 int MsrLsr;
123
124 struct urb *write_urb_pool[NUM_URBS];
125 /* we pass a pointer to this as the arguement sent to cypress_set_termios old_termios */
126 struct ktermios tmp_termios; /* stores the old termios settings */
127 spinlock_t lock; /* private lock */
128};
129
130/* This structure holds all of the individual serial device information */
131struct ATENINTL_serial {
132 __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */
133 unsigned char *interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */
134 struct urb *interrupt_read_urb; /* our interrupt urb */
135 __u8 bulk_in_endpoint; /* the bulk in endpoint handle */
136 unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */
137 struct urb *read_urb; /* our bulk read urb */
138 __u8 bulk_out_endpoint; /* the bulk out endpoint handle */
139 struct usb_serial *serial; /* loop back to the owner of this object */
140 int ATEN2011_spectrum_2or4ports; /* this says the number of ports in the device */
141 /* Indicates about the no.of opened ports of an individual USB-serial adapater. */
142 unsigned int NoOfOpenPorts;
143 /* a flag for Status endpoint polling */
144 unsigned char status_polling_started;
145};
146
147static void ATEN2011_set_termios(struct tty_struct *tty,
148 struct usb_serial_port *port,
149 struct ktermios *old_termios);
150static void ATEN2011_change_port_settings(struct tty_struct *tty,
151 struct ATENINTL_port *ATEN2011_port,
152 struct ktermios *old_termios);
153
154/*************************************
155 * Bit definitions for each register *
156 *************************************/
157#define LCR_BITS_5 0x00 /* 5 bits/char */
158#define LCR_BITS_6 0x01 /* 6 bits/char */
159#define LCR_BITS_7 0x02 /* 7 bits/char */
160#define LCR_BITS_8 0x03 /* 8 bits/char */
161#define LCR_BITS_MASK 0x03 /* Mask for bits/char field */
162
163#define LCR_STOP_1 0x00 /* 1 stop bit */
164#define LCR_STOP_1_5 0x04 /* 1.5 stop bits (if 5 bits/char) */
165#define LCR_STOP_2 0x04 /* 2 stop bits (if 6-8 bits/char) */
166#define LCR_STOP_MASK 0x04 /* Mask for stop bits field */
167
168#define LCR_PAR_NONE 0x00 /* No parity */
169#define LCR_PAR_ODD 0x08 /* Odd parity */
170#define LCR_PAR_EVEN 0x18 /* Even parity */
171#define LCR_PAR_MARK 0x28 /* Force parity bit to 1 */
172#define LCR_PAR_SPACE 0x38 /* Force parity bit to 0 */
173#define LCR_PAR_MASK 0x38 /* Mask for parity field */
174
175#define LCR_SET_BREAK 0x40 /* Set Break condition */
176#define LCR_DL_ENABLE 0x80 /* Enable access to divisor latch */
177
178#define MCR_DTR 0x01 /* Assert DTR */
179#define MCR_RTS 0x02 /* Assert RTS */
180#define MCR_OUT1 0x04 /* Loopback only: Sets state of RI */
181#define MCR_MASTER_IE 0x08 /* Enable interrupt outputs */
182#define MCR_LOOPBACK 0x10 /* Set internal (digital) loopback mode */
183#define MCR_XON_ANY 0x20 /* Enable any char to exit XOFF mode */
184
185#define ATEN2011_MSR_CTS 0x10 /* Current state of CTS */
186#define ATEN2011_MSR_DSR 0x20 /* Current state of DSR */
187#define ATEN2011_MSR_RI 0x40 /* Current state of RI */
188#define ATEN2011_MSR_CD 0x80 /* Current state of CD */
189
190
191static int debug;
192
193/*
194 * Version Information
195 */
196#define DRIVER_VERSION "2.0"
197#define DRIVER_DESC "ATENINTL 2011 USB Serial Adapter"
198
199/*
200 * Defines used for sending commands to port
201 */
202
203#define ATEN_WDR_TIMEOUT (50) /* default urb timeout */
204
205/* Requests */
206#define ATEN_RD_RTYPE 0xC0
207#define ATEN_WR_RTYPE 0x40
208#define ATEN_RDREQ 0x0D
209#define ATEN_WRREQ 0x0E
210#define ATEN_CTRL_TIMEOUT 500
211#define VENDOR_READ_LENGTH (0x01)
212
213/* set to 1 for RS485 mode and 0 for RS232 mode */
214/* FIXME make this somehow dynamic and not build time specific */
215static int RS485mode;
216
217static int set_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 val)
218{
219 struct usb_device *dev = port->serial->dev;
220 val = val & 0x00ff;
221
222 dbg("%s: is %x, value %x", __func__, reg, val);
223
224 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ATEN_WRREQ,
225 ATEN_WR_RTYPE, val, reg, NULL, 0,
226 ATEN_WDR_TIMEOUT);
227}
228
229static int get_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 *val)
230{
231 struct usb_device *dev = port->serial->dev;
232 int ret;
233
234 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ATEN_RDREQ,
235 ATEN_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
236 ATEN_WDR_TIMEOUT);
237 dbg("%s: offset is %x, return val %x", __func__, reg, *val);
238 *val = (*val) & 0x00ff;
239 return ret;
240}
241
242static int set_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 val)
243{
244 struct usb_device *dev = port->serial->dev;
245 struct ATENINTL_serial *a_serial;
246 __u16 minor;
247
248 a_serial = usb_get_serial_data(port->serial);
249 minor = port->serial->minor;
250 if (minor == SERIAL_TTY_NO_MINOR)
251 minor = 0;
252 val = val & 0x00ff;
253
254 /*
255 * For the UART control registers,
256 * the application number need to be Or'ed
257 */
258 if (a_serial->ATEN2011_spectrum_2or4ports == 4)
259 val |= (((__u16)port->number - minor) + 1) << 8;
260 else {
261 if (((__u16) port->number - minor) == 0)
262 val |= (((__u16)port->number - minor) + 1) << 8;
263 else
264 val |= (((__u16)port->number - minor) + 2) << 8;
265 }
266 dbg("%s: application number is %x", __func__, val);
267
268 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ATEN_WRREQ,
269 ATEN_WR_RTYPE, val, reg, NULL, 0,
270 ATEN_WDR_TIMEOUT);
271}
272
273static int get_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 *val)
274{
275 struct usb_device *dev = port->serial->dev;
276 int ret = 0;
277 __u16 wval;
278 struct ATENINTL_serial *a_serial;
279 __u16 minor = port->serial->minor;
280
281 a_serial = usb_get_serial_data(port->serial);
282 if (minor == SERIAL_TTY_NO_MINOR)
283 minor = 0;
284
285 /* wval is same as application number */
286 if (a_serial->ATEN2011_spectrum_2or4ports == 4)
287 wval = (((__u16)port->number - minor) + 1) << 8;
288 else {
289 if (((__u16) port->number - minor) == 0)
290 wval = (((__u16) port->number - minor) + 1) << 8;
291 else
292 wval = (((__u16) port->number - minor) + 2) << 8;
293 }
294 dbg("%s: application number is %x", __func__, wval);
295 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ATEN_RDREQ,
296 ATEN_RD_RTYPE, wval, reg, val, VENDOR_READ_LENGTH,
297 ATEN_WDR_TIMEOUT);
298 *val = (*val) & 0x00ff;
299 return ret;
300}
301
302static int handle_newMsr(struct ATENINTL_port *port, __u8 newMsr)
303{
304 struct ATENINTL_port *ATEN2011_port;
305 struct async_icount *icount;
306 ATEN2011_port = port;
307 icount = &ATEN2011_port->icount;
308 if (newMsr &
309 (ATEN_MSR_DELTA_CTS | ATEN_MSR_DELTA_DSR | ATEN_MSR_DELTA_RI |
310 ATEN_MSR_DELTA_CD)) {
311 icount = &ATEN2011_port->icount;
312
313 /* update input line counters */
314 if (newMsr & ATEN_MSR_DELTA_CTS)
315 icount->cts++;
316 if (newMsr & ATEN_MSR_DELTA_DSR)
317 icount->dsr++;
318 if (newMsr & ATEN_MSR_DELTA_CD)
319 icount->dcd++;
320 if (newMsr & ATEN_MSR_DELTA_RI)
321 icount->rng++;
322 }
323
324 return 0;
325}
326
327static int handle_newLsr(struct ATENINTL_port *port, __u8 newLsr)
328{
329 struct async_icount *icount;
330
331 dbg("%s - %02x", __func__, newLsr);
332
333 if (newLsr & SERIAL_LSR_BI) {
334 /*
335 * Parity and Framing errors only count if they occur exclusive
336 * of a break being received.
337 */
338 newLsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI);
339 }
340
341 /* update input line counters */
342 icount = &port->icount;
343 if (newLsr & SERIAL_LSR_BI)
344 icount->brk++;
345 if (newLsr & SERIAL_LSR_OE)
346 icount->overrun++;
347 if (newLsr & SERIAL_LSR_PE)
348 icount->parity++;
349 if (newLsr & SERIAL_LSR_FE)
350 icount->frame++;
351
352 return 0;
353}
354
355static void ATEN2011_control_callback(struct urb *urb)
356{
357 unsigned char *data;
358 struct ATENINTL_port *ATEN2011_port;
359 __u8 regval = 0x0;
360
361 switch (urb->status) {
362 case 0:
363 /* success */
364 break;
365 case -ECONNRESET:
366 case -ENOENT:
367 case -ESHUTDOWN:
368 /* this urb is terminated, clean up */
369 dbg("%s - urb shutting down with status: %d", __func__,
370 urb->status);
371 return;
372 default:
373 dbg("%s - nonzero urb status received: %d", __func__,
374 urb->status);
375 goto exit;
376 }
377
378 ATEN2011_port = (struct ATENINTL_port *)urb->context;
379
380 dbg("%s urb buffer size is %d", __func__, urb->actual_length);
381 dbg("%s ATEN2011_port->MsrLsr is %d port %d", __func__,
382 ATEN2011_port->MsrLsr, ATEN2011_port->port_num);
383 data = urb->transfer_buffer;
384 regval = (__u8) data[0];
385 dbg("%s data is %x", __func__, regval);
386 if (ATEN2011_port->MsrLsr == 0)
387 handle_newMsr(ATEN2011_port, regval);
388 else if (ATEN2011_port->MsrLsr == 1)
389 handle_newLsr(ATEN2011_port, regval);
390
391exit:
392 return;
393}
394
395static int ATEN2011_get_reg(struct ATENINTL_port *ATEN, __u16 Wval, __u16 reg,
396 __u16 *val)
397{
398 struct usb_device *dev = ATEN->port->serial->dev;
399 struct usb_ctrlrequest *dr = NULL;
400 unsigned char *buffer = NULL;
401 int ret = 0;
402 buffer = (__u8 *) ATEN->ctrl_buf;
403
404 dr = (void *)(buffer + 2);
405 dr->bRequestType = ATEN_RD_RTYPE;
406 dr->bRequest = ATEN_RDREQ;
407 dr->wValue = cpu_to_le16(Wval);
408 dr->wIndex = cpu_to_le16(reg);
409 dr->wLength = cpu_to_le16(2);
410
411 usb_fill_control_urb(ATEN->control_urb, dev, usb_rcvctrlpipe(dev, 0),
412 (unsigned char *)dr, buffer, 2,
413 ATEN2011_control_callback, ATEN);
414 ATEN->control_urb->transfer_buffer_length = 2;
415 ret = usb_submit_urb(ATEN->control_urb, GFP_ATOMIC);
416 return ret;
417}
418
419static void ATEN2011_interrupt_callback(struct urb *urb)
420{
421 int result;
422 int length;
423 struct ATENINTL_port *ATEN2011_port;
424 struct ATENINTL_serial *ATEN2011_serial;
425 struct usb_serial *serial;
426 __u16 Data;
427 unsigned char *data;
428 __u8 sp[5], st;
429 int i;
430 __u16 wval;
431 int minor;
432
433 dbg("%s", " : Entering");
434
435 ATEN2011_serial = (struct ATENINTL_serial *)urb->context;
436
437 switch (urb->status) {
438 case 0:
439 /* success */
440 break;
441 case -ECONNRESET:
442 case -ENOENT:
443 case -ESHUTDOWN:
444 /* this urb is terminated, clean up */
445 dbg("%s - urb shutting down with status: %d", __func__,
446 urb->status);
447 return;
448 default:
449 dbg("%s - nonzero urb status received: %d", __func__,
450 urb->status);
451 goto exit;
452 }
453 length = urb->actual_length;
454 data = urb->transfer_buffer;
455
456 serial = ATEN2011_serial->serial;
457
458 /* ATENINTL get 5 bytes
459 * Byte 1 IIR Port 1 (port.number is 0)
460 * Byte 2 IIR Port 2 (port.number is 1)
461 * Byte 3 IIR Port 3 (port.number is 2)
462 * Byte 4 IIR Port 4 (port.number is 3)
463 * Byte 5 FIFO status for both */
464
465 if (length && length > 5) {
466 dbg("%s", "Wrong data !!!");
467 return;
468 }
469
470 /* MATRIX */
471 if (ATEN2011_serial->ATEN2011_spectrum_2or4ports == 4) {
472 sp[0] = (__u8) data[0];
473 sp[1] = (__u8) data[1];
474 sp[2] = (__u8) data[2];
475 sp[3] = (__u8) data[3];
476 st = (__u8) data[4];
477 } else {
478 sp[0] = (__u8) data[0];
479 sp[1] = (__u8) data[2];
480 /* sp[2]=(__u8)data[2]; */
481 /* sp[3]=(__u8)data[3]; */
482 st = (__u8) data[4];
483
484 }
485 for (i = 0; i < serial->num_ports; i++) {
486 ATEN2011_port = usb_get_serial_port_data(serial->port[i]);
487 minor = serial->minor;
488 if (minor == SERIAL_TTY_NO_MINOR)
489 minor = 0;
490 if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)
491 && (i != 0))
492 wval =
493 (((__u16) serial->port[i]->number -
494 (__u16) (minor)) + 2) << 8;
495 else
496 wval =
497 (((__u16) serial->port[i]->number -
498 (__u16) (minor)) + 1) << 8;
499 if (ATEN2011_port->open != 0) {
500 if (sp[i] & 0x01) {
501 dbg("SP%d No Interrupt !!!", i);
502 } else {
503 switch (sp[i] & 0x0f) {
504 case SERIAL_IIR_RLS:
505 dbg("Serial Port %d: Receiver status error or address bit detected in 9-bit mode", i);
506 ATEN2011_port->MsrLsr = 1;
507 ATEN2011_get_reg(ATEN2011_port, wval,
508 LINE_STATUS_REGISTER,
509 &Data);
510 break;
511 case SERIAL_IIR_MS:
512 dbg("Serial Port %d: Modem status change", i);
513 ATEN2011_port->MsrLsr = 0;
514 ATEN2011_get_reg(ATEN2011_port, wval,
515 MODEM_STATUS_REGISTER,
516 &Data);
517 break;
518 }
519 }
520 }
521
522 }
523exit:
524 if (ATEN2011_serial->status_polling_started == 0)
525 return;
526
527 result = usb_submit_urb(urb, GFP_ATOMIC);
528 if (result) {
529 dev_err(&urb->dev->dev,
530 "%s - Error %d submitting interrupt urb\n",
531 __func__, result);
532 }
533
534 return;
535}
536
537static void ATEN2011_bulk_in_callback(struct urb *urb)
538{
539 int status;
540 unsigned char *data;
541 struct usb_serial *serial;
542 struct usb_serial_port *port;
543 struct ATENINTL_serial *ATEN2011_serial;
544 struct ATENINTL_port *ATEN2011_port;
545 struct tty_struct *tty;
546
547 if (urb->status) {
548 dbg("nonzero read bulk status received: %d", urb->status);
549 return;
550 }
551
552 ATEN2011_port = (struct ATENINTL_port *)urb->context;
553
554 port = (struct usb_serial_port *)ATEN2011_port->port;
555 serial = port->serial;
556
557 dbg("%s", "Entering...");
558
559 data = urb->transfer_buffer;
560 ATEN2011_serial = usb_get_serial_data(serial);
561
562 if (urb->actual_length) {
563 tty = tty_port_tty_get(&ATEN2011_port->port->port);
564 if (tty) {
565 tty_buffer_request_room(tty, urb->actual_length);
566 tty_insert_flip_string(tty, data, urb->actual_length);
567 tty_flip_buffer_push(tty);
568 tty_kref_put(tty);
569 }
570
571 ATEN2011_port->icount.rx += urb->actual_length;
572 dbg("ATEN2011_port->icount.rx is %d:",
573 ATEN2011_port->icount.rx);
574 }
575
576 if (!ATEN2011_port->read_urb) {
577 dbg("%s", "URB KILLED !!!");
578 return;
579 }
580
581 if (ATEN2011_port->read_urb->status != -EINPROGRESS) {
582 ATEN2011_port->read_urb->dev = serial->dev;
583
584 status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC);
585 if (status)
586 dbg("usb_submit_urb(read bulk) failed, status = %d", status);
587 }
588}
589
590static void ATEN2011_bulk_out_data_callback(struct urb *urb)
591{
592 struct ATENINTL_port *ATEN2011_port;
593 struct tty_struct *tty;
594
595 if (urb->status) {
596 dbg("nonzero write bulk status received:%d", urb->status);
597 return;
598 }
599
600 ATEN2011_port = (struct ATENINTL_port *)urb->context;
601
602 dbg("%s", "Entering .........");
603
604 tty = tty_port_tty_get(&ATEN2011_port->port->port);
605
606 if (tty && ATEN2011_port->open)
607 /* tell the tty driver that something has changed */
608 tty_wakeup(tty);
609
610 /* schedule_work(&ATEN2011_port->port->work); */
611 tty_kref_put(tty);
612
613}
614
615#ifdef ATENSerialProbe
616static int ATEN2011_serial_probe(struct usb_serial *serial,
617 const struct usb_device_id *id)
618{
619
620 /*need to implement the mode_reg reading and updating\
621 structures usb_serial_ device_type\
622 (i.e num_ports, num_bulkin,bulkout etc) */
623 /* Also we can update the changes attach */
624 return 1;
625}
626#endif
627
628static int ATEN2011_open(struct tty_struct *tty, struct usb_serial_port *port,
629 struct file *filp)
630{
631 int response;
632 int j;
633 struct usb_serial *serial;
634 struct urb *urb;
635 __u16 Data;
636 int status;
637 struct ATENINTL_serial *ATEN2011_serial;
638 struct ATENINTL_port *ATEN2011_port;
639 struct ktermios tmp_termios;
640 int minor;
641
642 serial = port->serial;
643
644 ATEN2011_port = usb_get_serial_port_data(port);
645
646 if (ATEN2011_port == NULL)
647 return -ENODEV;
648
649 ATEN2011_serial = usb_get_serial_data(serial);
650 if (ATEN2011_serial == NULL)
651 return -ENODEV;
652
653 /* increment the number of opened ports counter here */
654 ATEN2011_serial->NoOfOpenPorts++;
655
656 usb_clear_halt(serial->dev, port->write_urb->pipe);
657 usb_clear_halt(serial->dev, port->read_urb->pipe);
658
659 /* Initialising the write urb pool */
660 for (j = 0; j < NUM_URBS; ++j) {
661 urb = usb_alloc_urb(0, GFP_ATOMIC);
662 ATEN2011_port->write_urb_pool[j] = urb;
663
664 if (urb == NULL) {
665 err("No more urbs???");
666 continue;
667 }
668
669 urb->transfer_buffer = NULL;
670 urb->transfer_buffer =
671 kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
672 if (!urb->transfer_buffer) {
673 err("%s-out of memory for urb buffers.", __func__);
674 continue;
675 }
676 }
677
678/*****************************************************************************
679 * Initialize ATEN2011 -- Write Init values to corresponding Registers
680 *
681 * Register Index
682 * 1 : IER
683 * 2 : FCR
684 * 3 : LCR
685 * 4 : MCR
686 *
687 * 0x08 : SP1/2 Control Reg
688 *****************************************************************************/
689
690/* NEED to check the fallowing Block */
691
692 Data = 0x0;
693 status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data);
694 if (status < 0) {
695 dbg("Reading Spreg failed");
696 return -1;
697 }
698 Data |= 0x80;
699 status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data);
700 if (status < 0) {
701 dbg("writing Spreg failed");
702 return -1;
703 }
704
705 Data &= ~0x80;
706 status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data);
707 if (status < 0) {
708 dbg("writing Spreg failed");
709 return -1;
710 }
711
712/* End of block to be checked */
713/**************************CHECK***************************/
714
715 if (RS485mode == 0)
716 Data = 0xC0;
717 else
718 Data = 0x00;
719 status = set_uart_reg(port, SCRATCH_PAD_REGISTER, Data);
720 if (status < 0) {
721 dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x", status);
722 return -1;
723 } else
724 dbg("SCRATCH_PAD_REGISTER Writing success status%d", status);
725
726/**************************CHECK***************************/
727
728 Data = 0x0;
729 status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data);
730 if (status < 0) {
731 dbg("Reading Controlreg failed");
732 return -1;
733 }
734 Data |= 0x08; /* Driver done bit */
735 Data |= 0x20; /* rx_disable */
736 status = 0;
737 status =
738 set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data);
739 if (status < 0) {
740 dbg("writing Controlreg failed");
741 return -1;
742 }
743 /*
744 * do register settings here
745 * Set all regs to the device default values.
746 * First Disable all interrupts.
747 */
748
749 Data = 0x00;
750 status = set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
751 if (status < 0) {
752 dbg("disableing interrupts failed");
753 return -1;
754 }
755 /* Set FIFO_CONTROL_REGISTER to the default value */
756 Data = 0x00;
757 status = set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
758 if (status < 0) {
759 dbg("Writing FIFO_CONTROL_REGISTER failed");
760 return -1;
761 }
762
763 Data = 0xcf; /* chk */
764 status = set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
765 if (status < 0) {
766 dbg("Writing FIFO_CONTROL_REGISTER failed");
767 return -1;
768 }
769
770 Data = 0x03; /* LCR_BITS_8 */
771 status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
772 ATEN2011_port->shadowLCR = Data;
773
774 Data = 0x0b; /* MCR_DTR|MCR_RTS|MCR_MASTER_IE */
775 status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
776 ATEN2011_port->shadowMCR = Data;
777
778#ifdef Check
779 Data = 0x00;
780 status = get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
781 ATEN2011_port->shadowLCR = Data;
782
783 Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */
784 status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
785
786 Data = 0x0c;
787 status = set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
788
789 Data = 0x0;
790 status = set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
791
792 Data = 0x00;
793 status = get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
794
795/* Data = ATEN2011_port->shadowLCR; */ /* data latch disable */
796 Data = Data & ~SERIAL_LCR_DLAB;
797 status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
798 ATEN2011_port->shadowLCR = Data;
799#endif
800 /* clearing Bulkin and Bulkout Fifo */
801 Data = 0x0;
802 status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data);
803
804 Data = Data | 0x0c;
805 status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data);
806
807 Data = Data & ~0x0c;
808 status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data);
809 /* Finally enable all interrupts */
810 Data = 0x0;
811 Data = 0x0c;
812 status = set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
813
814 /* clearing rx_disable */
815 Data = 0x0;
816 status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data);
817 Data = Data & ~0x20;
818 status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data);
819
820 /* rx_negate */
821 Data = 0x0;
822 status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data);
823 Data = Data | 0x10;
824 status = 0;
825 status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data);
826
827 /*
828 * Check to see if we've set up our endpoint info yet
829 * (can't set it up in ATEN2011_startup as the structures
830 * were not set up at that time.)
831 */
832 if (ATEN2011_serial->NoOfOpenPorts == 1) {
833 /* start the status polling here */
834 ATEN2011_serial->status_polling_started = 1;
835 /* If not yet set, Set here */
836 ATEN2011_serial->interrupt_in_buffer =
837 serial->port[0]->interrupt_in_buffer;
838 ATEN2011_serial->interrupt_in_endpoint =
839 serial->port[0]->interrupt_in_endpointAddress;
840 ATEN2011_serial->interrupt_read_urb =
841 serial->port[0]->interrupt_in_urb;
842
843 /* set up interrupt urb */
844 usb_fill_int_urb(ATEN2011_serial->interrupt_read_urb,
845 serial->dev,
846 usb_rcvintpipe(serial->dev,
847 ATEN2011_serial->
848 interrupt_in_endpoint),
849 ATEN2011_serial->interrupt_in_buffer,
850 ATEN2011_serial->interrupt_read_urb->
851 transfer_buffer_length,
852 ATEN2011_interrupt_callback, ATEN2011_serial,
853 ATEN2011_serial->interrupt_read_urb->interval);
854
855 /* start interrupt read for ATEN2011 *
856 * will continue as long as ATEN2011 is connected */
857
858 response =
859 usb_submit_urb(ATEN2011_serial->interrupt_read_urb,
860 GFP_KERNEL);
861 if (response) {
862 dbg("%s - Error %d submitting interrupt urb",
863 __func__, response);
864 }
865
866 }
867
868 /*
869 * See if we've set up our endpoint info yet
870 * (can't set it up in ATEN2011_startup as the
871 * structures were not set up at that time.)
872 */
873
874 dbg("port number is %d", port->number);
875 dbg("serial number is %d", port->serial->minor);
876 dbg("Bulkin endpoint is %d", port->bulk_in_endpointAddress);
877 dbg("BulkOut endpoint is %d", port->bulk_out_endpointAddress);
878 dbg("Interrupt endpoint is %d",
879 port->interrupt_in_endpointAddress);
880 dbg("port's number in the device is %d", ATEN2011_port->port_num);
881 ATEN2011_port->bulk_in_buffer = port->bulk_in_buffer;
882 ATEN2011_port->bulk_in_endpoint = port->bulk_in_endpointAddress;
883 ATEN2011_port->read_urb = port->read_urb;
884 ATEN2011_port->bulk_out_endpoint = port->bulk_out_endpointAddress;
885
886 minor = port->serial->minor;
887 if (minor == SERIAL_TTY_NO_MINOR)
888 minor = 0;
889
890 /* set up our bulk in urb */
891 if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)
892 && (((__u16) port->number - (__u16) (minor)) != 0)) {
893 usb_fill_bulk_urb(ATEN2011_port->read_urb, serial->dev,
894 usb_rcvbulkpipe(serial->dev,
895 (port->
896 bulk_in_endpointAddress +
897 2)), port->bulk_in_buffer,
898 ATEN2011_port->read_urb->
899 transfer_buffer_length,
900 ATEN2011_bulk_in_callback, ATEN2011_port);
901 } else
902 usb_fill_bulk_urb(ATEN2011_port->read_urb,
903 serial->dev,
904 usb_rcvbulkpipe(serial->dev,
905 port->
906 bulk_in_endpointAddress),
907 port->bulk_in_buffer,
908 ATEN2011_port->read_urb->
909 transfer_buffer_length,
910 ATEN2011_bulk_in_callback, ATEN2011_port);
911
912 dbg("ATEN2011_open: bulkin endpoint is %d",
913 port->bulk_in_endpointAddress);
914 response = usb_submit_urb(ATEN2011_port->read_urb, GFP_KERNEL);
915 if (response) {
916 err("%s - Error %d submitting control urb", __func__,
917 response);
918 }
919
920 /* initialize our wait queues */
921 init_waitqueue_head(&ATEN2011_port->wait_chase);
922 init_waitqueue_head(&ATEN2011_port->wait_command);
923
924 /* initialize our icount structure */
925 memset(&(ATEN2011_port->icount), 0x00, sizeof(ATEN2011_port->icount));
926
927 /* initialize our port settings */
928 ATEN2011_port->shadowMCR = MCR_MASTER_IE; /* Must set to enable ints! */
929 ATEN2011_port->chaseResponsePending = 0;
930 /* send a open port command */
931 ATEN2011_port->open = 1;
932 /* ATEN2011_change_port_settings(ATEN2011_port,old_termios); */
933 /* Setup termios */
934 ATEN2011_set_termios(tty, port, &tmp_termios);
935 ATEN2011_port->icount.tx = 0;
936 ATEN2011_port->icount.rx = 0;
937
938 dbg("usb_serial serial:%x ATEN2011_port:%x\nATEN2011_serial:%x usb_serial_port port:%x",
939 (unsigned int)serial, (unsigned int)ATEN2011_port,
940 (unsigned int)ATEN2011_serial, (unsigned int)port);
941
942 return 0;
943
944}
945
946static int ATEN2011_chars_in_buffer(struct tty_struct *tty)
947{
948 struct usb_serial_port *port = tty->driver_data;
949 int i;
950 int chars = 0;
951 struct ATENINTL_port *ATEN2011_port;
952
953 /* dbg("%s"," ATEN2011_chars_in_buffer:entering ..........."); */
954
955 ATEN2011_port = usb_get_serial_port_data(port);
956 if (ATEN2011_port == NULL) {
957 dbg("%s", "ATEN2011_break:leaving ...........");
958 return -1;
959 }
960
961 for (i = 0; i < NUM_URBS; ++i)
962 if (ATEN2011_port->write_urb_pool[i]->status == -EINPROGRESS)
963 chars += URB_TRANSFER_BUFFER_SIZE;
964
965 dbg("%s - returns %d", __func__, chars);
966 return chars;
967
968}
969
970static void ATEN2011_block_until_tx_empty(struct tty_struct *tty,
971 struct ATENINTL_port *ATEN2011_port)
972{
973 int timeout = HZ / 10;
974 int wait = 30;
975 int count;
976
977 while (1) {
978 count = ATEN2011_chars_in_buffer(tty);
979
980 /* Check for Buffer status */
981 if (count <= 0)
982 return;
983
984 /* Block the thread for a while */
985 interruptible_sleep_on_timeout(&ATEN2011_port->wait_chase,
986 timeout);
987
988 /* No activity.. count down section */
989 wait--;
990 if (wait == 0) {
991 dbg("%s - TIMEOUT", __func__);
992 return;
993 } else {
994 /* Reset timout value back to seconds */
995 wait = 30;
996 }
997 }
998}
999
1000static void ATEN2011_close(struct tty_struct *tty, struct usb_serial_port *port,
1001 struct file *filp)
1002{
1003 struct usb_serial *serial;
1004 struct ATENINTL_serial *ATEN2011_serial;
1005 struct ATENINTL_port *ATEN2011_port;
1006 int no_urbs;
1007 __u16 Data;
1008
1009 dbg("%s", "ATEN2011_close:entering...");
1010 serial = port->serial;
1011
1012 /* take the Adpater and port's private data */
1013 ATEN2011_serial = usb_get_serial_data(serial);
1014 ATEN2011_port = usb_get_serial_port_data(port);
1015 if ((ATEN2011_serial == NULL) || (ATEN2011_port == NULL))
1016 return;
1017
1018 if (serial->dev) {
1019 /* flush and block(wait) until tx is empty */
1020 ATEN2011_block_until_tx_empty(tty, ATEN2011_port);
1021 }
1022 /* kill the ports URB's */
1023 for (no_urbs = 0; no_urbs < NUM_URBS; no_urbs++)
1024 usb_kill_urb(ATEN2011_port->write_urb_pool[no_urbs]);
1025 /* Freeing Write URBs */
1026 for (no_urbs = 0; no_urbs < NUM_URBS; ++no_urbs) {
1027 kfree(ATEN2011_port->write_urb_pool[no_urbs]->transfer_buffer);
1028 usb_free_urb(ATEN2011_port->write_urb_pool[no_urbs]);
1029 }
1030 /* While closing port, shutdown all bulk read, write *
1031 * and interrupt read if they exists */
1032 if (serial->dev) {
1033 if (ATEN2011_port->write_urb) {
1034 dbg("%s", "Shutdown bulk write");
1035 usb_kill_urb(ATEN2011_port->write_urb);
1036 }
1037 if (ATEN2011_port->read_urb) {
1038 dbg("%s", "Shutdown bulk read");
1039 usb_kill_urb(ATEN2011_port->read_urb);
1040 }
1041 if ((&ATEN2011_port->control_urb)) {
1042 dbg("%s", "Shutdown control read");
1043 /* usb_kill_urb (ATEN2011_port->control_urb); */
1044
1045 }
1046 }
1047 /* if(ATEN2011_port->ctrl_buf != NULL) */
1048 /* kfree(ATEN2011_port->ctrl_buf); */
1049 /* decrement the no.of open ports counter of an individual USB-serial adapter. */
1050 ATEN2011_serial->NoOfOpenPorts--;
1051 dbg("NoOfOpenPorts in close%d:in port%d",
1052 ATEN2011_serial->NoOfOpenPorts, port->number);
1053 if (ATEN2011_serial->NoOfOpenPorts == 0) {
1054 /* stop the stus polling here */
1055 ATEN2011_serial->status_polling_started = 0;
1056 if (ATEN2011_serial->interrupt_read_urb) {
1057 dbg("%s", "Shutdown interrupt_read_urb");
1058 /* ATEN2011_serial->interrupt_in_buffer=NULL; */
1059 /* usb_kill_urb (ATEN2011_serial->interrupt_read_urb); */
1060 }
1061 }
1062 if (ATEN2011_port->write_urb) {
1063 /* if this urb had a transfer buffer already (old tx) free it */
1064 kfree(ATEN2011_port->write_urb->transfer_buffer);
1065 usb_free_urb(ATEN2011_port->write_urb);
1066 }
1067
1068 /* clear the MCR & IER */
1069 Data = 0x00;
1070 set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
1071 Data = 0x00;
1072 set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
1073
1074 ATEN2011_port->open = 0;
1075 dbg("%s", "Leaving ............");
1076
1077}
1078
1079static void ATEN2011_block_until_chase_response(struct tty_struct *tty,
1080 struct ATENINTL_port
1081 *ATEN2011_port)
1082{
1083 int timeout = 1 * HZ;
1084 int wait = 10;
1085 int count;
1086
1087 while (1) {
1088 count = ATEN2011_chars_in_buffer(tty);
1089
1090 /* Check for Buffer status */
1091 if (count <= 0) {
1092 ATEN2011_port->chaseResponsePending = 0;
1093 return;
1094 }
1095
1096 /* Block the thread for a while */
1097 interruptible_sleep_on_timeout(&ATEN2011_port->wait_chase,
1098 timeout);
1099 /* No activity.. count down section */
1100 wait--;
1101 if (wait == 0) {
1102 dbg("%s - TIMEOUT", __func__);
1103 return;
1104 } else {
1105 /* Reset timout value back to seconds */
1106 wait = 10;
1107 }
1108 }
1109
1110}
1111
1112static void ATEN2011_break(struct tty_struct *tty, int break_state)
1113{
1114 struct usb_serial_port *port = tty->driver_data;
1115 unsigned char data;
1116 struct usb_serial *serial;
1117 struct ATENINTL_serial *ATEN2011_serial;
1118 struct ATENINTL_port *ATEN2011_port;
1119
1120 dbg("%s", "Entering ...........");
1121 dbg("ATEN2011_break: Start");
1122
1123 serial = port->serial;
1124
1125 ATEN2011_serial = usb_get_serial_data(serial);
1126 ATEN2011_port = usb_get_serial_port_data(port);
1127
1128 if ((ATEN2011_serial == NULL) || (ATEN2011_port == NULL))
1129 return;
1130
1131 /* flush and chase */
1132 ATEN2011_port->chaseResponsePending = 1;
1133
1134 if (serial->dev) {
1135 /* flush and block until tx is empty */
1136 ATEN2011_block_until_chase_response(tty, ATEN2011_port);
1137 }
1138
1139 if (break_state == -1)
1140 data = ATEN2011_port->shadowLCR | LCR_SET_BREAK;
1141 else
1142 data = ATEN2011_port->shadowLCR & ~LCR_SET_BREAK;
1143
1144 ATEN2011_port->shadowLCR = data;
1145 dbg("ATEN2011_break ATEN2011_port->shadowLCR is %x",
1146 ATEN2011_port->shadowLCR);
1147 set_uart_reg(port, LINE_CONTROL_REGISTER, ATEN2011_port->shadowLCR);
1148
1149 return;
1150}
1151
1152static int ATEN2011_write_room(struct tty_struct *tty)
1153{
1154 struct usb_serial_port *port = tty->driver_data;
1155 int i;
1156 int room = 0;
1157 struct ATENINTL_port *ATEN2011_port;
1158
1159 ATEN2011_port = usb_get_serial_port_data(port);
1160 if (ATEN2011_port == NULL) {
1161 dbg("%s", "ATEN2011_break:leaving ...........");
1162 return -1;
1163 }
1164
1165 for (i = 0; i < NUM_URBS; ++i)
1166 if (ATEN2011_port->write_urb_pool[i]->status != -EINPROGRESS)
1167 room += URB_TRANSFER_BUFFER_SIZE;
1168
1169 dbg("%s - returns %d", __func__, room);
1170 return room;
1171
1172}
1173
1174static int ATEN2011_write(struct tty_struct *tty, struct usb_serial_port *port,
1175 const unsigned char *data, int count)
1176{
1177 int status;
1178 int i;
1179 int bytes_sent = 0;
1180 int transfer_size;
1181 int minor;
1182
1183 struct ATENINTL_port *ATEN2011_port;
1184 struct usb_serial *serial;
1185 struct ATENINTL_serial *ATEN2011_serial;
1186 struct urb *urb;
1187 const unsigned char *current_position = data;
1188 unsigned char *data1;
1189 dbg("%s", "entering ...........");
1190
1191 serial = port->serial;
1192
1193 ATEN2011_port = usb_get_serial_port_data(port);
1194 if (ATEN2011_port == NULL) {
1195 dbg("%s", "ATEN2011_port is NULL");
1196 return -1;
1197 }
1198
1199 ATEN2011_serial = usb_get_serial_data(serial);
1200 if (ATEN2011_serial == NULL) {
1201 dbg("%s", "ATEN2011_serial is NULL");
1202 return -1;
1203 }
1204
1205 /* try to find a free urb in the list */
1206 urb = NULL;
1207
1208 for (i = 0; i < NUM_URBS; ++i) {
1209 if (ATEN2011_port->write_urb_pool[i]->status != -EINPROGRESS) {
1210 urb = ATEN2011_port->write_urb_pool[i];
1211 dbg("URB:%d", i);
1212 break;
1213 }
1214 }
1215
1216 if (urb == NULL) {
1217 dbg("%s - no more free urbs", __func__);
1218 goto exit;
1219 }
1220
1221 if (urb->transfer_buffer == NULL) {
1222 urb->transfer_buffer =
1223 kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
1224
1225 if (urb->transfer_buffer == NULL) {
1226 err("%s no more kernel memory...", __func__);
1227 goto exit;
1228 }
1229 }
1230 transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
1231
1232 memcpy(urb->transfer_buffer, current_position, transfer_size);
1233 /* usb_serial_debug_data (__FILE__, __func__, transfer_size, urb->transfer_buffer); */
1234
1235 /* fill urb with data and submit */
1236 minor = port->serial->minor;
1237 if (minor == SERIAL_TTY_NO_MINOR)
1238 minor = 0;
1239 if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)
1240 && (((__u16) port->number - (__u16) (minor)) != 0)) {
1241 usb_fill_bulk_urb(urb, ATEN2011_serial->serial->dev,
1242 usb_sndbulkpipe(ATEN2011_serial->serial->dev,
1243 (port->
1244 bulk_out_endpointAddress) +
1245 2), urb->transfer_buffer,
1246 transfer_size,
1247 ATEN2011_bulk_out_data_callback,
1248 ATEN2011_port);
1249 } else
1250
1251 usb_fill_bulk_urb(urb,
1252 ATEN2011_serial->serial->dev,
1253 usb_sndbulkpipe(ATEN2011_serial->serial->dev,
1254 port->
1255 bulk_out_endpointAddress),
1256 urb->transfer_buffer, transfer_size,
1257 ATEN2011_bulk_out_data_callback,
1258 ATEN2011_port);
1259
1260 data1 = urb->transfer_buffer;
1261 dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress);
1262 /* for(i=0;i < urb->actual_length;i++) */
1263 /* dbg("Data is %c ",data1[i]); */
1264
1265 /* send it down the pipe */
1266 status = usb_submit_urb(urb, GFP_ATOMIC);
1267
1268 if (status) {
1269 err("%s - usb_submit_urb(write bulk) failed with status = %d",
1270 __func__, status);
1271 bytes_sent = status;
1272 goto exit;
1273 }
1274 bytes_sent = transfer_size;
1275 ATEN2011_port->icount.tx += transfer_size;
1276 dbg("ATEN2011_port->icount.tx is %d:", ATEN2011_port->icount.tx);
1277
1278exit:
1279 return bytes_sent;
1280}
1281
1282static void ATEN2011_throttle(struct tty_struct *tty)
1283{
1284 struct usb_serial_port *port = tty->driver_data;
1285 struct ATENINTL_port *ATEN2011_port;
1286 int status;
1287
1288 dbg("- port %d", port->number);
1289
1290 ATEN2011_port = usb_get_serial_port_data(port);
1291
1292 if (ATEN2011_port == NULL)
1293 return;
1294
1295 if (!ATEN2011_port->open) {
1296 dbg("%s", "port not opened");
1297 return;
1298 }
1299
1300 dbg("%s", "Entering .......... ");
1301
1302 if (!tty) {
1303 dbg("%s - no tty available", __func__);
1304 return;
1305 }
1306
1307 /* if we are implementing XON/XOFF, send the stop character */
1308 if (I_IXOFF(tty)) {
1309 unsigned char stop_char = STOP_CHAR(tty);
1310 status = ATEN2011_write(tty, port, &stop_char, 1);
1311 if (status <= 0)
1312 return;
1313 }
1314
1315 /* if we are implementing RTS/CTS, toggle that line */
1316 if (tty->termios->c_cflag & CRTSCTS) {
1317 ATEN2011_port->shadowMCR &= ~MCR_RTS;
1318 status = set_uart_reg(port, MODEM_CONTROL_REGISTER,
1319 ATEN2011_port->shadowMCR);
1320 if (status < 0)
1321 return;
1322 }
1323
1324 return;
1325}
1326
1327static void ATEN2011_unthrottle(struct tty_struct *tty)
1328{
1329 struct usb_serial_port *port = tty->driver_data;
1330 int status;
1331 struct ATENINTL_port *ATEN2011_port = usb_get_serial_port_data(port);
1332
1333 if (ATEN2011_port == NULL)
1334 return;
1335
1336 if (!ATEN2011_port->open) {
1337 dbg("%s - port not opened", __func__);
1338 return;
1339 }
1340
1341 dbg("%s", "Entering .......... ");
1342
1343 if (!tty) {
1344 dbg("%s - no tty available", __func__);
1345 return;
1346 }
1347
1348 /* if we are implementing XON/XOFF, send the start character */
1349 if (I_IXOFF(tty)) {
1350 unsigned char start_char = START_CHAR(tty);
1351 status = ATEN2011_write(tty, port, &start_char, 1);
1352 if (status <= 0)
1353 return;
1354 }
1355
1356 /* if we are implementing RTS/CTS, toggle that line */
1357 if (tty->termios->c_cflag & CRTSCTS) {
1358 ATEN2011_port->shadowMCR |= MCR_RTS;
1359 status = set_uart_reg(port, MODEM_CONTROL_REGISTER,
1360 ATEN2011_port->shadowMCR);
1361 if (status < 0)
1362 return;
1363 }
1364
1365 return;
1366}
1367
1368static int ATEN2011_tiocmget(struct tty_struct *tty, struct file *file)
1369{
1370 struct usb_serial_port *port = tty->driver_data;
1371 struct ATENINTL_port *ATEN2011_port;
1372 unsigned int result;
1373 __u16 msr;
1374 __u16 mcr;
1375 /* unsigned int mcr; */
1376 int status = 0;
1377 ATEN2011_port = usb_get_serial_port_data(port);
1378
1379 dbg("%s - port %d", __func__, port->number);
1380
1381 if (ATEN2011_port == NULL)
1382 return -ENODEV;
1383
1384 status = get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
1385 status = get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
1386 /* mcr = ATEN2011_port->shadowMCR; */
1387 /* COMMENT2: the Fallowing three line are commented for updating only MSR values */
1388 result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
1389 | ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
1390 | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
1391 | ((msr & ATEN2011_MSR_CTS) ? TIOCM_CTS : 0)
1392 | ((msr & ATEN2011_MSR_CD) ? TIOCM_CAR : 0)
1393 | ((msr & ATEN2011_MSR_RI) ? TIOCM_RI : 0)
1394 | ((msr & ATEN2011_MSR_DSR) ? TIOCM_DSR : 0);
1395
1396 dbg("%s - 0x%04X", __func__, result);
1397
1398 return result;
1399}
1400
1401static int ATEN2011_tiocmset(struct tty_struct *tty, struct file *file,
1402 unsigned int set, unsigned int clear)
1403{
1404 struct usb_serial_port *port = tty->driver_data;
1405 struct ATENINTL_port *ATEN2011_port;
1406 unsigned int mcr;
1407 unsigned int status;
1408
1409 dbg("%s - port %d", __func__, port->number);
1410
1411 ATEN2011_port = usb_get_serial_port_data(port);
1412
1413 if (ATEN2011_port == NULL)
1414 return -ENODEV;
1415
1416 mcr = ATEN2011_port->shadowMCR;
1417 if (clear & TIOCM_RTS)
1418 mcr &= ~MCR_RTS;
1419 if (clear & TIOCM_DTR)
1420 mcr &= ~MCR_DTR;
1421 if (clear & TIOCM_LOOP)
1422 mcr &= ~MCR_LOOPBACK;
1423
1424 if (set & TIOCM_RTS)
1425 mcr |= MCR_RTS;
1426 if (set & TIOCM_DTR)
1427 mcr |= MCR_DTR;
1428 if (set & TIOCM_LOOP)
1429 mcr |= MCR_LOOPBACK;
1430
1431 ATEN2011_port->shadowMCR = mcr;
1432
1433 status = set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr);
1434 if (status < 0) {
1435 dbg("setting MODEM_CONTROL_REGISTER Failed");
1436 return -1;
1437 }
1438
1439 return 0;
1440}
1441
1442static void ATEN2011_set_termios(struct tty_struct *tty,
1443 struct usb_serial_port *port,
1444 struct ktermios *old_termios)
1445{
1446 int status;
1447 unsigned int cflag;
1448 struct usb_serial *serial;
1449 struct ATENINTL_port *ATEN2011_port;
1450
1451 dbg("ATEN2011_set_termios: START");
1452
1453 serial = port->serial;
1454
1455 ATEN2011_port = usb_get_serial_port_data(port);
1456
1457 if (ATEN2011_port == NULL)
1458 return;
1459
1460 if (!ATEN2011_port->open) {
1461 dbg("%s - port not opened", __func__);
1462 return;
1463 }
1464
1465 dbg("%s", "setting termios - ");
1466
1467 cflag = tty->termios->c_cflag;
1468
1469 dbg("%s - cflag %08x iflag %08x", __func__,
1470 tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag));
1471
1472 if (old_termios) {
1473 dbg("%s - old clfag %08x old iflag %08x", __func__,
1474 old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag));
1475 }
1476
1477 dbg("%s - port %d", __func__, port->number);
1478
1479 /* change the port settings to the new ones specified */
1480
1481 ATEN2011_change_port_settings(tty, ATEN2011_port, old_termios);
1482
1483 if (!ATEN2011_port->read_urb) {
1484 dbg("%s", "URB KILLED !!!!!");
1485 return;
1486 }
1487
1488 if (ATEN2011_port->read_urb->status != -EINPROGRESS) {
1489 ATEN2011_port->read_urb->dev = serial->dev;
1490 status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC);
1491 if (status) {
1492 dbg
1493 (" usb_submit_urb(read bulk) failed, status = %d",
1494 status);
1495 }
1496 }
1497 return;
1498}
1499
1500static int get_lsr_info(struct tty_struct *tty,
1501 struct ATENINTL_port *ATEN2011_port,
1502 unsigned int __user *value)
1503{
1504 int count;
1505 unsigned int result = 0;
1506
1507 count = ATEN2011_chars_in_buffer(tty);
1508 if (count == 0) {
1509 dbg("%s -- Empty", __func__);
1510 result = TIOCSER_TEMT;
1511 }
1512
1513 if (copy_to_user(value, &result, sizeof(int)))
1514 return -EFAULT;
1515 return 0;
1516}
1517
1518static int get_number_bytes_avail(struct tty_struct *tty,
1519 struct ATENINTL_port *ATEN2011_port,
1520 unsigned int __user *value)
1521{
1522 unsigned int result = 0;
1523
1524 if (!tty)
1525 return -ENOIOCTLCMD;
1526
1527 result = tty->read_cnt;
1528
1529 dbg("%s(%d) = %d", __func__, ATEN2011_port->port->number, result);
1530 if (copy_to_user(value, &result, sizeof(int)))
1531 return -EFAULT;
1532
1533 return -ENOIOCTLCMD;
1534}
1535
1536static int set_modem_info(struct ATENINTL_port *ATEN2011_port, unsigned int cmd,
1537 unsigned int __user *value)
1538{
1539 unsigned int mcr;
1540 unsigned int arg;
1541 __u16 Data;
1542 int status;
1543 struct usb_serial_port *port;
1544
1545 if (ATEN2011_port == NULL)
1546 return -1;
1547
1548 port = (struct usb_serial_port *)ATEN2011_port->port;
1549
1550 mcr = ATEN2011_port->shadowMCR;
1551
1552 if (copy_from_user(&arg, value, sizeof(int)))
1553 return -EFAULT;
1554
1555 switch (cmd) {
1556 case TIOCMBIS:
1557 if (arg & TIOCM_RTS)
1558 mcr |= MCR_RTS;
1559 if (arg & TIOCM_DTR)
1560 mcr |= MCR_RTS;
1561 if (arg & TIOCM_LOOP)
1562 mcr |= MCR_LOOPBACK;
1563 break;
1564
1565 case TIOCMBIC:
1566 if (arg & TIOCM_RTS)
1567 mcr &= ~MCR_RTS;
1568 if (arg & TIOCM_DTR)
1569 mcr &= ~MCR_RTS;
1570 if (arg & TIOCM_LOOP)
1571 mcr &= ~MCR_LOOPBACK;
1572 break;
1573
1574 case TIOCMSET:
1575 /* turn off the RTS and DTR and LOOPBACK
1576 * and then only turn on what was asked to */
1577 mcr &= ~(MCR_RTS | MCR_DTR | MCR_LOOPBACK);
1578 mcr |= ((arg & TIOCM_RTS) ? MCR_RTS : 0);
1579 mcr |= ((arg & TIOCM_DTR) ? MCR_DTR : 0);
1580 mcr |= ((arg & TIOCM_LOOP) ? MCR_LOOPBACK : 0);
1581 break;
1582 }
1583
1584 ATEN2011_port->shadowMCR = mcr;
1585
1586 Data = ATEN2011_port->shadowMCR;
1587 status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
1588 if (status < 0) {
1589 dbg("setting MODEM_CONTROL_REGISTER Failed");
1590 return -1;
1591 }
1592
1593 return 0;
1594}
1595
1596static int get_modem_info(struct ATENINTL_port *ATEN2011_port,
1597 unsigned int __user *value)
1598{
1599 unsigned int result = 0;
1600 __u16 msr;
1601 unsigned int mcr = ATEN2011_port->shadowMCR;
1602 int status;
1603
1604 status = get_uart_reg(ATEN2011_port->port, MODEM_STATUS_REGISTER, &msr);
1605 result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) /* 0x002 */
1606 |((mcr & MCR_RTS) ? TIOCM_RTS : 0) /* 0x004 */
1607 |((msr & ATEN2011_MSR_CTS) ? TIOCM_CTS : 0) /* 0x020 */
1608 |((msr & ATEN2011_MSR_CD) ? TIOCM_CAR : 0) /* 0x040 */
1609 |((msr & ATEN2011_MSR_RI) ? TIOCM_RI : 0) /* 0x080 */
1610 |((msr & ATEN2011_MSR_DSR) ? TIOCM_DSR : 0); /* 0x100 */
1611
1612 dbg("%s -- %x", __func__, result);
1613
1614 if (copy_to_user(value, &result, sizeof(int)))
1615 return -EFAULT;
1616 return 0;
1617}
1618
1619static int get_serial_info(struct ATENINTL_port *ATEN2011_port,
1620 struct serial_struct __user *retinfo)
1621{
1622 struct serial_struct tmp;
1623
1624 if (ATEN2011_port == NULL)
1625 return -1;
1626
1627 if (!retinfo)
1628 return -EFAULT;
1629
1630 memset(&tmp, 0, sizeof(tmp));
1631
1632 tmp.type = PORT_16550A;
1633 tmp.line = ATEN2011_port->port->serial->minor;
1634 if (tmp.line == SERIAL_TTY_NO_MINOR)
1635 tmp.line = 0;
1636 tmp.port = ATEN2011_port->port->number;
1637 tmp.irq = 0;
1638 tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
1639 tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE;
1640 tmp.baud_base = 9600;
1641 tmp.close_delay = 5 * HZ;
1642 tmp.closing_wait = 30 * HZ;
1643
1644 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
1645 return -EFAULT;
1646 return 0;
1647}
1648
1649static int ATEN2011_ioctl(struct tty_struct *tty, struct file *file,
1650 unsigned int cmd, unsigned long arg)
1651{
1652 struct usb_serial_port *port = tty->driver_data;
1653 struct ATENINTL_port *ATEN2011_port;
1654 struct async_icount cnow;
1655 struct async_icount cprev;
1656 struct serial_icounter_struct icount;
1657 int ATENret = 0;
1658 unsigned int __user *user_arg = (unsigned int __user *)arg;
1659
1660 ATEN2011_port = usb_get_serial_port_data(port);
1661
1662 if (ATEN2011_port == NULL)
1663 return -1;
1664
1665 dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd);
1666
1667 switch (cmd) {
1668 /* return number of bytes available */
1669
1670 case TIOCINQ:
1671 dbg("%s (%d) TIOCINQ", __func__, port->number);
1672 return get_number_bytes_avail(tty, ATEN2011_port, user_arg);
1673 break;
1674
1675 case TIOCOUTQ:
1676 dbg("%s (%d) TIOCOUTQ", __func__, port->number);
1677 return put_user(ATEN2011_chars_in_buffer(tty), user_arg);
1678 break;
1679
1680 case TIOCSERGETLSR:
1681 dbg("%s (%d) TIOCSERGETLSR", __func__, port->number);
1682 return get_lsr_info(tty, ATEN2011_port, user_arg);
1683 return 0;
1684
1685 case TIOCMBIS:
1686 case TIOCMBIC:
1687 case TIOCMSET:
1688 dbg("%s (%d) TIOCMSET/TIOCMBIC/TIOCMSET", __func__,
1689 port->number);
1690 ATENret = set_modem_info(ATEN2011_port, cmd, user_arg);
1691 return ATENret;
1692
1693 case TIOCMGET:
1694 dbg("%s (%d) TIOCMGET", __func__, port->number);
1695 return get_modem_info(ATEN2011_port, user_arg);
1696
1697 case TIOCGSERIAL:
1698 dbg("%s (%d) TIOCGSERIAL", __func__, port->number);
1699 return get_serial_info(ATEN2011_port,
1700 (struct serial_struct __user *)arg);
1701
1702 case TIOCSSERIAL:
1703 dbg("%s (%d) TIOCSSERIAL", __func__, port->number);
1704 break;
1705
1706 case TIOCMIWAIT:
1707 dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
1708 cprev = ATEN2011_port->icount;
1709 while (1) {
1710 /* see if a signal did it */
1711 if (signal_pending(current))
1712 return -ERESTARTSYS;
1713 cnow = ATEN2011_port->icount;
1714 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
1715 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
1716 return -EIO; /* no change => error */
1717 if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
1718 ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
1719 ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
1720 ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
1721 return 0;
1722 }
1723 cprev = cnow;
1724 }
1725 /* NOTREACHED */
1726 break;
1727
1728 case TIOCGICOUNT:
1729 cnow = ATEN2011_port->icount;
1730 icount.cts = cnow.cts;
1731 icount.dsr = cnow.dsr;
1732 icount.rng = cnow.rng;
1733 icount.dcd = cnow.dcd;
1734 icount.rx = cnow.rx;
1735 icount.tx = cnow.tx;
1736 icount.frame = cnow.frame;
1737 icount.overrun = cnow.overrun;
1738 icount.parity = cnow.parity;
1739 icount.brk = cnow.brk;
1740 icount.buf_overrun = cnow.buf_overrun;
1741
1742 dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__,
1743 port->number, icount.rx, icount.tx);
1744 if (copy_to_user((void __user *)arg, &icount, sizeof(icount)))
1745 return -EFAULT;
1746 return 0;
1747
1748 default:
1749 break;
1750 }
1751
1752 return -ENOIOCTLCMD;
1753}
1754
1755static int ATEN2011_calc_baud_rate_divisor(int baudRate, int *divisor,
1756 __u16 *clk_sel_val)
1757{
1758 dbg("%s - %d", __func__, baudRate);
1759
1760 if (baudRate <= 115200) {
1761 *divisor = 115200 / baudRate;
1762 *clk_sel_val = 0x0;
1763 }
1764 if ((baudRate > 115200) && (baudRate <= 230400)) {
1765 *divisor = 230400 / baudRate;
1766 *clk_sel_val = 0x10;
1767 } else if ((baudRate > 230400) && (baudRate <= 403200)) {
1768 *divisor = 403200 / baudRate;
1769 *clk_sel_val = 0x20;
1770 } else if ((baudRate > 403200) && (baudRate <= 460800)) {
1771 *divisor = 460800 / baudRate;
1772 *clk_sel_val = 0x30;
1773 } else if ((baudRate > 460800) && (baudRate <= 806400)) {
1774 *divisor = 806400 / baudRate;
1775 *clk_sel_val = 0x40;
1776 } else if ((baudRate > 806400) && (baudRate <= 921600)) {
1777 *divisor = 921600 / baudRate;
1778 *clk_sel_val = 0x50;
1779 } else if ((baudRate > 921600) && (baudRate <= 1572864)) {
1780 *divisor = 1572864 / baudRate;
1781 *clk_sel_val = 0x60;
1782 } else if ((baudRate > 1572864) && (baudRate <= 3145728)) {
1783 *divisor = 3145728 / baudRate;
1784 *clk_sel_val = 0x70;
1785 }
1786 return 0;
1787}
1788
1789static int ATEN2011_send_cmd_write_baud_rate(struct ATENINTL_port
1790 *ATEN2011_port, int baudRate)
1791{
1792 int divisor = 0;
1793 int status;
1794 __u16 Data;
1795 unsigned char number;
1796 __u16 clk_sel_val;
1797 struct usb_serial_port *port;
1798 int minor;
1799
1800 if (ATEN2011_port == NULL)
1801 return -1;
1802
1803 port = (struct usb_serial_port *)ATEN2011_port->port;
1804
1805 dbg("%s", "Entering .......... ");
1806
1807 minor = ATEN2011_port->port->serial->minor;
1808 if (minor == SERIAL_TTY_NO_MINOR)
1809 minor = 0;
1810 number = ATEN2011_port->port->number - minor;
1811
1812 dbg("%s - port = %d, baud = %d", __func__,
1813 ATEN2011_port->port->number, baudRate);
1814 /* reset clk_uart_sel in spregOffset */
1815 if (baudRate > 115200) {
1816#ifdef HW_flow_control
1817 /*
1818 * NOTE: need to see the pther register to modify
1819 * setting h/w flow control bit to 1;
1820 */
1821 /* Data = ATEN2011_port->shadowMCR; */
1822 Data = 0x2b;
1823 ATEN2011_port->shadowMCR = Data;
1824 status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
1825 if (status < 0) {
1826 dbg("Writing spreg failed in set_serial_baud");
1827 return -1;
1828 }
1829#endif
1830
1831 } else {
1832#ifdef HW_flow_control
1833 /* setting h/w flow control bit to 0; */
1834 /* Data = ATEN2011_port->shadowMCR; */
1835 Data = 0xb;
1836 ATEN2011_port->shadowMCR = Data;
1837 status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
1838 if (status < 0) {
1839 dbg("Writing spreg failed in set_serial_baud");
1840 return -1;
1841 }
1842#endif
1843
1844 }
1845
1846 if (1) /* baudRate <= 115200) */ {
1847 clk_sel_val = 0x0;
1848 Data = 0x0;
1849 status =
1850 ATEN2011_calc_baud_rate_divisor(baudRate, &divisor,
1851 &clk_sel_val);
1852 status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data);
1853 if (status < 0) {
1854 dbg("reading spreg failed in set_serial_baud");
1855 return -1;
1856 }
1857 Data = (Data & 0x8f) | clk_sel_val;
1858 status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data);
1859 if (status < 0) {
1860 dbg("Writing spreg failed in set_serial_baud");
1861 return -1;
1862 }
1863 /* Calculate the Divisor */
1864
1865 if (status) {
1866 err("%s - bad baud rate", __func__);
1867 dbg("%s", "bad baud rate");
1868 return status;
1869 }
1870 /* Enable access to divisor latch */
1871 Data = ATEN2011_port->shadowLCR | SERIAL_LCR_DLAB;
1872 ATEN2011_port->shadowLCR = Data;
1873 set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
1874
1875 /* Write the divisor */
1876 Data = (unsigned char)(divisor & 0xff);
1877 dbg("set_serial_baud Value to write DLL is %x", Data);
1878 set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
1879
1880 Data = (unsigned char)((divisor & 0xff00) >> 8);
1881 dbg("set_serial_baud Value to write DLM is %x", Data);
1882 set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
1883
1884 /* Disable access to divisor latch */
1885 Data = ATEN2011_port->shadowLCR & ~SERIAL_LCR_DLAB;
1886 ATEN2011_port->shadowLCR = Data;
1887 set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
1888
1889 }
1890
1891 return status;
1892}
1893
1894static void ATEN2011_change_port_settings(struct tty_struct *tty,
1895 struct ATENINTL_port *ATEN2011_port,
1896 struct ktermios *old_termios)
1897{
1898 int baud;
1899 unsigned cflag;
1900 unsigned iflag;
1901 __u8 lData;
1902 __u8 lParity;
1903 __u8 lStop;
1904 int status;
1905 __u16 Data;
1906 struct usb_serial_port *port;
1907 struct usb_serial *serial;
1908
1909 if (ATEN2011_port == NULL)
1910 return;
1911
1912 port = (struct usb_serial_port *)ATEN2011_port->port;
1913
1914 serial = port->serial;
1915
1916 dbg("%s - port %d", __func__, ATEN2011_port->port->number);
1917
1918 if (!ATEN2011_port->open) {
1919 dbg("%s - port not opened", __func__);
1920 return;
1921 }
1922
1923 if ((!tty) || (!tty->termios)) {
1924 dbg("%s - no tty structures", __func__);
1925 return;
1926 }
1927
1928 dbg("%s", "Entering .......... ");
1929
1930 lData = LCR_BITS_8;
1931 lStop = LCR_STOP_1;
1932 lParity = LCR_PAR_NONE;
1933
1934 cflag = tty->termios->c_cflag;
1935 iflag = tty->termios->c_iflag;
1936
1937 /* Change the number of bits */
1938
1939 /* COMMENT1: the below Line"if(cflag & CSIZE)" is added for the errors we get for serial loop data test i.e serial_loopback.pl -v */
1940 /* if(cflag & CSIZE) */
1941 {
1942 switch (cflag & CSIZE) {
1943 case CS5:
1944 lData = LCR_BITS_5;
1945 break;
1946
1947 case CS6:
1948 lData = LCR_BITS_6;
1949 break;
1950
1951 case CS7:
1952 lData = LCR_BITS_7;
1953 break;
1954 default:
1955 case CS8:
1956 lData = LCR_BITS_8;
1957 break;
1958 }
1959 }
1960 /* Change the Parity bit */
1961 if (cflag & PARENB) {
1962 if (cflag & PARODD) {
1963 lParity = LCR_PAR_ODD;
1964 dbg("%s - parity = odd", __func__);
1965 } else {
1966 lParity = LCR_PAR_EVEN;
1967 dbg("%s - parity = even", __func__);
1968 }
1969
1970 } else {
1971 dbg("%s - parity = none", __func__);
1972 }
1973
1974 if (cflag & CMSPAR)
1975 lParity = lParity | 0x20;
1976
1977 /* Change the Stop bit */
1978 if (cflag & CSTOPB) {
1979 lStop = LCR_STOP_2;
1980 dbg("%s - stop bits = 2", __func__);
1981 } else {
1982 lStop = LCR_STOP_1;
1983 dbg("%s - stop bits = 1", __func__);
1984 }
1985
1986 /* Update the LCR with the correct value */
1987 ATEN2011_port->shadowLCR &=
1988 ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
1989 ATEN2011_port->shadowLCR |= (lData | lParity | lStop);
1990
1991 dbg
1992 ("ATEN2011_change_port_settings ATEN2011_port->shadowLCR is %x",
1993 ATEN2011_port->shadowLCR);
1994 /* Disable Interrupts */
1995 Data = 0x00;
1996 set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
1997
1998 Data = 0x00;
1999 set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
2000
2001 Data = 0xcf;
2002 set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
2003
2004 /* Send the updated LCR value to the ATEN2011 */
2005 Data = ATEN2011_port->shadowLCR;
2006
2007 set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
2008
2009 Data = 0x00b;
2010 ATEN2011_port->shadowMCR = Data;
2011 set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
2012 Data = 0x00b;
2013 set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
2014
2015 /* set up the MCR register and send it to the ATEN2011 */
2016
2017 ATEN2011_port->shadowMCR = MCR_MASTER_IE;
2018 if (cflag & CBAUD)
2019 ATEN2011_port->shadowMCR |= (MCR_DTR | MCR_RTS);
2020
2021 if (cflag & CRTSCTS)
2022 ATEN2011_port->shadowMCR |= (MCR_XON_ANY);
2023 else
2024 ATEN2011_port->shadowMCR &= ~(MCR_XON_ANY);
2025
2026 Data = ATEN2011_port->shadowMCR;
2027 set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
2028
2029 /* Determine divisor based on baud rate */
2030 baud = tty_get_baud_rate(tty);
2031
2032 if (!baud) {
2033 /* pick a default, any default... */
2034 dbg("%s", "Picked default baud...");
2035 baud = 9600;
2036 }
2037
2038 dbg("%s - baud rate = %d", __func__, baud);
2039 status = ATEN2011_send_cmd_write_baud_rate(ATEN2011_port, baud);
2040
2041 /* Enable Interrupts */
2042 Data = 0x0c;
2043 set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
2044
2045 if (ATEN2011_port->read_urb->status != -EINPROGRESS) {
2046 ATEN2011_port->read_urb->dev = serial->dev;
2047
2048 status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC);
2049
2050 if (status) {
2051 dbg
2052 (" usb_submit_urb(read bulk) failed, status = %d",
2053 status);
2054 }
2055 }
2056 dbg
2057 ("ATEN2011_change_port_settings ATEN2011_port->shadowLCR is End %x",
2058 ATEN2011_port->shadowLCR);
2059
2060 return;
2061}
2062
2063static int ATEN2011_calc_num_ports(struct usb_serial *serial)
2064{
2065
2066 __u16 Data = 0x00;
2067 int ret = 0;
2068 int ATEN2011_2or4ports;
2069 ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
2070 ATEN_RDREQ, ATEN_RD_RTYPE, 0, GPIO_REGISTER,
2071 &Data, VENDOR_READ_LENGTH, ATEN_WDR_TIMEOUT);
2072
2073/* ghostgum: here is where the problem appears to bet */
2074/* Which of the following are needed? */
2075/* Greg used the serial->type->num_ports=2 */
2076/* But the code in the ATEN2011_open relies on serial->num_ports=2 */
2077 if ((Data & 0x01) == 0) {
2078 ATEN2011_2or4ports = 2;
2079 serial->type->num_ports = 2;
2080 serial->num_ports = 2;
2081 }
2082 /* else if(serial->interface->cur_altsetting->desc.bNumEndpoints == 9) */
2083 else {
2084 ATEN2011_2or4ports = 4;
2085 serial->type->num_ports = 4;
2086 serial->num_ports = 4;
2087
2088 }
2089
2090 return ATEN2011_2or4ports;
2091}
2092
2093static int ATEN2011_startup(struct usb_serial *serial)
2094{
2095 struct ATENINTL_serial *ATEN2011_serial;
2096 struct ATENINTL_port *ATEN2011_port;
2097 struct usb_device *dev;
2098 int i, status;
2099 int minor;
2100
2101 __u16 Data;
2102 dbg("%s", " ATEN2011_startup :entering..........");
2103
2104 if (!serial) {
2105 dbg("%s", "Invalid Handler");
2106 return -1;
2107 }
2108
2109 dev = serial->dev;
2110
2111 dbg("%s", "Entering...");
2112
2113 /* create our private serial structure */
2114 ATEN2011_serial = kzalloc(sizeof(struct ATENINTL_serial), GFP_KERNEL);
2115 if (ATEN2011_serial == NULL) {
2116 err("%s - Out of memory", __func__);
2117 return -ENOMEM;
2118 }
2119
2120 /* resetting the private structure field values to zero */
2121 memset(ATEN2011_serial, 0, sizeof(struct ATENINTL_serial));
2122
2123 ATEN2011_serial->serial = serial;
2124 /* initilize status polling flag to 0 */
2125 ATEN2011_serial->status_polling_started = 0;
2126
2127 usb_set_serial_data(serial, ATEN2011_serial);
2128 ATEN2011_serial->ATEN2011_spectrum_2or4ports =
2129 ATEN2011_calc_num_ports(serial);
2130 /* we set up the pointers to the endpoints in the ATEN2011_open *
2131 * function, as the structures aren't created yet. */
2132
2133 /* set up port private structures */
2134 for (i = 0; i < serial->num_ports; ++i) {
2135 ATEN2011_port =
2136 kmalloc(sizeof(struct ATENINTL_port), GFP_KERNEL);
2137 if (ATEN2011_port == NULL) {
2138 err("%s - Out of memory", __func__);
2139 usb_set_serial_data(serial, NULL);
2140 kfree(ATEN2011_serial);
2141 return -ENOMEM;
2142 }
2143 memset(ATEN2011_port, 0, sizeof(struct ATENINTL_port));
2144
2145 /*
2146 * Initialize all port interrupt end point to port 0
2147 * int endpoint. Our device has only one interrupt end point
2148 * comman to all port
2149 */
2150 /* serial->port[i]->interrupt_in_endpointAddress = serial->port[0]->interrupt_in_endpointAddress; */
2151
2152 ATEN2011_port->port = serial->port[i];
2153 usb_set_serial_port_data(serial->port[i], ATEN2011_port);
2154
2155 minor = serial->port[i]->serial->minor;
2156 if (minor == SERIAL_TTY_NO_MINOR)
2157 minor = 0;
2158 ATEN2011_port->port_num =
2159 ((serial->port[i]->number - minor) + 1);
2160
2161 if (ATEN2011_port->port_num == 1) {
2162 ATEN2011_port->SpRegOffset = 0x0;
2163 ATEN2011_port->ControlRegOffset = 0x1;
2164 ATEN2011_port->DcrRegOffset = 0x4;
2165 } else if ((ATEN2011_port->port_num == 2)
2166 && (ATEN2011_serial->ATEN2011_spectrum_2or4ports ==
2167 4)) {
2168 ATEN2011_port->SpRegOffset = 0x8;
2169 ATEN2011_port->ControlRegOffset = 0x9;
2170 ATEN2011_port->DcrRegOffset = 0x16;
2171 } else if ((ATEN2011_port->port_num == 2)
2172 && (ATEN2011_serial->ATEN2011_spectrum_2or4ports ==
2173 2)) {
2174 ATEN2011_port->SpRegOffset = 0xa;
2175 ATEN2011_port->ControlRegOffset = 0xb;
2176 ATEN2011_port->DcrRegOffset = 0x19;
2177 } else if ((ATEN2011_port->port_num == 3)
2178 && (ATEN2011_serial->ATEN2011_spectrum_2or4ports ==
2179 4)) {
2180 ATEN2011_port->SpRegOffset = 0xa;
2181 ATEN2011_port->ControlRegOffset = 0xb;
2182 ATEN2011_port->DcrRegOffset = 0x19;
2183 } else if ((ATEN2011_port->port_num == 4)
2184 && (ATEN2011_serial->ATEN2011_spectrum_2or4ports ==
2185 4)) {
2186 ATEN2011_port->SpRegOffset = 0xc;
2187 ATEN2011_port->ControlRegOffset = 0xd;
2188 ATEN2011_port->DcrRegOffset = 0x1c;
2189 }
2190
2191 usb_set_serial_port_data(serial->port[i], ATEN2011_port);
2192
2193 /* enable rx_disable bit in control register */
2194
2195 status = get_reg_sync(serial->port[i],
2196 ATEN2011_port->ControlRegOffset, &Data);
2197 if (status < 0) {
2198 dbg("Reading ControlReg failed status-0x%x",
2199 status);
2200 break;
2201 } else
2202 dbg
2203 ("ControlReg Reading success val is %x, status%d",
2204 Data, status);
2205 Data |= 0x08; /* setting driver done bit */
2206 Data |= 0x04; /* sp1_bit to have cts change reflect in modem status reg */
2207
2208 /* Data |= 0x20; */ /* rx_disable bit */
2209 status = set_reg_sync(serial->port[i],
2210 ATEN2011_port->ControlRegOffset, Data);
2211 if (status < 0) {
2212 dbg
2213 ("Writing ControlReg failed(rx_disable) status-0x%x",
2214 status);
2215 break;
2216 } else
2217 dbg
2218 ("ControlReg Writing success(rx_disable) status%d",
2219 status);
2220
2221 /*
2222 * Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
2223 * and 0x24 in DCR3
2224 */
2225 Data = 0x01;
2226 status = set_reg_sync(serial->port[i],
2227 (__u16)(ATEN2011_port->DcrRegOffset + 0),
2228 Data);
2229 if (status < 0) {
2230 dbg("Writing DCR0 failed status-0x%x", status);
2231 break;
2232 } else
2233 dbg("DCR0 Writing success status%d", status);
2234
2235 Data = 0x05;
2236 status = set_reg_sync(serial->port[i],
2237 (__u16)(ATEN2011_port->DcrRegOffset + 1),
2238 Data);
2239 if (status < 0) {
2240 dbg("Writing DCR1 failed status-0x%x", status);
2241 break;
2242 } else
2243 dbg("DCR1 Writing success status%d", status);
2244
2245 Data = 0x24;
2246 status = set_reg_sync(serial->port[i],
2247 (__u16)(ATEN2011_port->DcrRegOffset + 2),
2248 Data);
2249 if (status < 0) {
2250 dbg("Writing DCR2 failed status-0x%x", status);
2251 break;
2252 } else
2253 dbg("DCR2 Writing success status%d", status);
2254
2255 /* write values in clkstart0x0 and clkmulti 0x20 */
2256 Data = 0x0;
2257 status = set_reg_sync(serial->port[i], CLK_START_VALUE_REGISTER,
2258 Data);
2259 if (status < 0) {
2260 dbg
2261 ("Writing CLK_START_VALUE_REGISTER failed status-0x%x",
2262 status);
2263 break;
2264 } else
2265 dbg
2266 ("CLK_START_VALUE_REGISTER Writing success status%d",
2267 status);
2268
2269 Data = 0x20;
2270 status = set_reg_sync(serial->port[i], CLK_MULTI_REGISTER,
2271 Data);
2272 if (status < 0) {
2273 dbg
2274 ("Writing CLK_MULTI_REGISTER failed status-0x%x",
2275 status);
2276 break;
2277 } else
2278 dbg("CLK_MULTI_REGISTER Writing success status%d",
2279 status);
2280
2281 /* Zero Length flag register */
2282 if ((ATEN2011_port->port_num != 1)
2283 && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)) {
2284
2285 Data = 0xff;
2286 status = set_reg_sync(serial->port[i],
2287 (__u16)(ZLP_REG1 + ((__u16)ATEN2011_port->port_num)),
2288 Data);
2289 dbg("ZLIP offset%x",
2290 (__u16) (ZLP_REG1 +
2291 ((__u16) ATEN2011_port->port_num)));
2292 if (status < 0) {
2293 dbg
2294 ("Writing ZLP_REG%d failed status-0x%x",
2295 i + 2, status);
2296 break;
2297 } else
2298 dbg("ZLP_REG%d Writing success status%d",
2299 i + 2, status);
2300 } else {
2301 Data = 0xff;
2302 status = set_reg_sync(serial->port[i],
2303 (__u16)(ZLP_REG1 + ((__u16)ATEN2011_port->port_num) - 0x1),
2304 Data);
2305 dbg("ZLIP offset%x",
2306 (__u16) (ZLP_REG1 +
2307 ((__u16) ATEN2011_port->port_num) -
2308 0x1));
2309 if (status < 0) {
2310 dbg
2311 ("Writing ZLP_REG%d failed status-0x%x",
2312 i + 1, status);
2313 break;
2314 } else
2315 dbg("ZLP_REG%d Writing success status%d",
2316 i + 1, status);
2317
2318 }
2319 ATEN2011_port->control_urb = usb_alloc_urb(0, GFP_ATOMIC);
2320 ATEN2011_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
2321
2322 }
2323
2324 /* Zero Length flag enable */
2325 Data = 0x0f;
2326 status = set_reg_sync(serial->port[0], ZLP_REG5, Data);
2327 if (status < 0) {
2328 dbg("Writing ZLP_REG5 failed status-0x%x", status);
2329 return -1;
2330 } else
2331 dbg("ZLP_REG5 Writing success status%d", status);
2332
2333 /* setting configuration feature to one */
2334 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
2335 (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ);
2336 return 0;
2337}
2338
2339static void ATEN2011_release(struct usb_serial *serial)
2340{
2341 int i;
2342 struct ATENINTL_port *ATEN2011_port;
2343
2344 /* check for the ports to be closed,close the ports and disconnect */
2345
2346 /* free private structure allocated for serial port *
2347 * stop reads and writes on all ports */
2348
2349 for (i = 0; i < serial->num_ports; ++i) {
2350 ATEN2011_port = usb_get_serial_port_data(serial->port[i]);
2351 kfree(ATEN2011_port->ctrl_buf);
2352 usb_kill_urb(ATEN2011_port->control_urb);
2353 kfree(ATEN2011_port);
2354 usb_set_serial_port_data(serial->port[i], NULL);
2355 }
2356
2357 /* free private structure allocated for serial device */
2358
2359 kfree(usb_get_serial_data(serial));
2360 usb_set_serial_data(serial, NULL);
2361}
2362
2363static struct usb_serial_driver aten_serial_driver = {
2364 .driver = {
2365 .owner = THIS_MODULE,
2366 .name = "aten2011",
2367 },
2368 .description = DRIVER_DESC,
2369 .id_table = id_table,
2370 .open = ATEN2011_open,
2371 .close = ATEN2011_close,
2372 .write = ATEN2011_write,
2373 .write_room = ATEN2011_write_room,
2374 .chars_in_buffer = ATEN2011_chars_in_buffer,
2375 .throttle = ATEN2011_throttle,
2376 .unthrottle = ATEN2011_unthrottle,
2377 .calc_num_ports = ATEN2011_calc_num_ports,
2378
2379 .ioctl = ATEN2011_ioctl,
2380 .set_termios = ATEN2011_set_termios,
2381 .break_ctl = ATEN2011_break,
2382 .tiocmget = ATEN2011_tiocmget,
2383 .tiocmset = ATEN2011_tiocmset,
2384 .attach = ATEN2011_startup,
2385 .release = ATEN2011_release,
2386 .read_bulk_callback = ATEN2011_bulk_in_callback,
2387 .read_int_callback = ATEN2011_interrupt_callback,
2388};
2389
2390static struct usb_driver aten_driver = {
2391 .name = "aten2011",
2392 .probe = usb_serial_probe,
2393 .disconnect = usb_serial_disconnect,
2394 .id_table = id_table,
2395};
2396
2397static int __init aten_init(void)
2398{
2399 int retval;
2400
2401 /* Register with the usb serial */
2402 retval = usb_serial_register(&aten_serial_driver);
2403 if (retval)
2404 return retval;
2405
2406 printk(KERN_INFO KBUILD_MODNAME ":"
2407 DRIVER_DESC " " DRIVER_VERSION "\n");
2408
2409 /* Register with the usb */
2410 retval = usb_register(&aten_driver);
2411 if (retval)
2412 usb_serial_deregister(&aten_serial_driver);
2413
2414 return retval;
2415}
2416
2417static void __exit aten_exit(void)
2418{
2419 usb_deregister(&aten_driver);
2420 usb_serial_deregister(&aten_serial_driver);
2421}
2422
2423module_init(aten_init);
2424module_exit(aten_exit);
2425
2426/* Module information */
2427MODULE_DESCRIPTION(DRIVER_DESC);
2428MODULE_LICENSE("GPL");
2429
2430MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index 0ab9d15f3439..f5416af1e902 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -21,6 +21,7 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/fb.h> 22#include <linux/fb.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/vmalloc.h>
24 25
25#include "udlfb.h" 26#include "udlfb.h"
26 27
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 22f93dd0ba03..251220dc8851 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/smp_lock.h>
21#include <linux/file.h> 22#include <linux/file.h>
22#include <linux/tcp.h> 23#include <linux/tcp.h>
23#include <linux/in.h> 24#include <linux/in.h>
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index a10ed27acbc2..f43ca416e4a8 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -344,7 +344,7 @@ static CHIP_INFO chip_info_table[]= {
344}; 344};
345 345
346static struct pci_device_id device_id_table[] __devinitdata = { 346static struct pci_device_id device_id_table[] __devinitdata = {
347{ 0x1106, 0x3253, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (int)&chip_info_table[0]}, 347{ 0x1106, 0x3253, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long)&chip_info_table[0]},
348{ 0, } 348{ 0, }
349}; 349};
350#endif 350#endif
@@ -369,7 +369,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
369 369
370#ifdef CONFIG_PM 370#ifdef CONFIG_PM
371static int device_notify_reboot(struct notifier_block *, unsigned long event, void *ptr); 371static int device_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
372static int viawget_suspend(struct pci_dev *pcid, u32 state); 372static int viawget_suspend(struct pci_dev *pcid, pm_message_t state);
373static int viawget_resume(struct pci_dev *pcid); 373static int viawget_resume(struct pci_dev *pcid);
374struct notifier_block device_notifier = { 374struct notifier_block device_notifier = {
375 notifier_call: device_notify_reboot, 375 notifier_call: device_notify_reboot,
@@ -3941,7 +3941,7 @@ device_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
3941 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) { 3941 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
3942 if(pci_dev_driver(pdev) == &device_driver) { 3942 if(pci_dev_driver(pdev) == &device_driver) {
3943 if (pci_get_drvdata(pdev)) 3943 if (pci_get_drvdata(pdev))
3944 viawget_suspend(pdev, 3); 3944 viawget_suspend(pdev, PMSG_HIBERNATE);
3945 } 3945 }
3946 } 3946 }
3947 } 3947 }
@@ -3949,7 +3949,7 @@ device_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
3949} 3949}
3950 3950
3951static int 3951static int
3952viawget_suspend(struct pci_dev *pcid, u32 state) 3952viawget_suspend(struct pci_dev *pcid, pm_message_t state)
3953{ 3953{
3954 int power_status; // to silence the compiler 3954 int power_status; // to silence the compiler
3955 3955
@@ -3971,7 +3971,7 @@ viawget_suspend(struct pci_dev *pcid, u32 state)
3971 memset(pMgmt->abyCurrBSSID, 0, 6); 3971 memset(pMgmt->abyCurrBSSID, 0, 6);
3972 pMgmt->eCurrState = WMAC_STATE_IDLE; 3972 pMgmt->eCurrState = WMAC_STATE_IDLE;
3973 pci_disable_device(pcid); 3973 pci_disable_device(pcid);
3974 power_status = pci_set_power_state(pcid, state); 3974 power_status = pci_set_power_state(pcid, pci_choose_state(pcid, state));
3975 spin_unlock_irq(&pDevice->lock); 3975 spin_unlock_irq(&pDevice->lock);
3976 return 0; 3976 return 0;
3977} 3977}
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index a913efc69669..40de151f2789 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -257,6 +257,7 @@
257#include <linux/fs.h> /* everything... */ 257#include <linux/fs.h> /* everything... */
258#include <linux/errno.h> /* error codes */ 258#include <linux/errno.h> /* error codes */
259#include <linux/slab.h> 259#include <linux/slab.h>
260#include <linux/smp_lock.h>
260#include <linux/mm.h> 261#include <linux/mm.h>
261#include <linux/ioport.h> 262#include <linux/ioport.h>
262#include <linux/interrupt.h> 263#include <linux/interrupt.h>
diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c
index b52cc830c0b4..f3873f650bb4 100644
--- a/drivers/telephony/phonedev.c
+++ b/drivers/telephony/phonedev.c
@@ -23,7 +23,6 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/phonedev.h> 24#include <linux/phonedev.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/smp_lock.h>
27#include <asm/uaccess.h> 26#include <asm/uaccess.h>
28#include <asm/system.h> 27#include <asm/system.h>
29 28
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 38bfdb0f6660..2bfc41ece0e1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -462,11 +462,18 @@ urbs:
462 462
463 rcv->buffer = buf; 463 rcv->buffer = buf;
464 464
465 usb_fill_bulk_urb(rcv->urb, acm->dev, 465 if (acm->is_int_ep)
466 acm->rx_endpoint, 466 usb_fill_int_urb(rcv->urb, acm->dev,
467 buf->base, 467 acm->rx_endpoint,
468 acm->readsize, 468 buf->base,
469 acm_read_bulk, rcv); 469 acm->readsize,
470 acm_read_bulk, rcv, acm->bInterval);
471 else
472 usb_fill_bulk_urb(rcv->urb, acm->dev,
473 acm->rx_endpoint,
474 buf->base,
475 acm->readsize,
476 acm_read_bulk, rcv);
470 rcv->urb->transfer_dma = buf->dma; 477 rcv->urb->transfer_dma = buf->dma;
471 rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 478 rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
472 479
@@ -550,7 +557,7 @@ static void acm_waker(struct work_struct *waker)
550static int acm_tty_open(struct tty_struct *tty, struct file *filp) 557static int acm_tty_open(struct tty_struct *tty, struct file *filp)
551{ 558{
552 struct acm *acm; 559 struct acm *acm;
553 int rv = -EINVAL; 560 int rv = -ENODEV;
554 int i; 561 int i;
555 dbg("Entering acm_tty_open."); 562 dbg("Entering acm_tty_open.");
556 563
@@ -677,7 +684,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
677 684
678 /* Perform the closing process and see if we need to do the hardware 685 /* Perform the closing process and see if we need to do the hardware
679 shutdown */ 686 shutdown */
680 if (tty_port_close_start(&acm->port, tty, filp) == 0) 687 if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0)
681 return; 688 return;
682 acm_port_down(acm, 0); 689 acm_port_down(acm, 0);
683 tty_port_close_end(&acm->port, tty); 690 tty_port_close_end(&acm->port, tty);
@@ -740,7 +747,7 @@ static int acm_tty_chars_in_buffer(struct tty_struct *tty)
740{ 747{
741 struct acm *acm = tty->driver_data; 748 struct acm *acm = tty->driver_data;
742 if (!ACM_READY(acm)) 749 if (!ACM_READY(acm))
743 return -EINVAL; 750 return 0;
744 /* 751 /*
745 * This is inaccurate (overcounts), but it works. 752 * This is inaccurate (overcounts), but it works.
746 */ 753 */
@@ -1173,6 +1180,9 @@ made_compressed_probe:
1173 spin_lock_init(&acm->read_lock); 1180 spin_lock_init(&acm->read_lock);
1174 mutex_init(&acm->mutex); 1181 mutex_init(&acm->mutex);
1175 acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); 1182 acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
1183 acm->is_int_ep = usb_endpoint_xfer_int(epread);
1184 if (acm->is_int_ep)
1185 acm->bInterval = epread->bInterval;
1176 tty_port_init(&acm->port); 1186 tty_port_init(&acm->port);
1177 acm->port.ops = &acm_port_ops; 1187 acm->port.ops = &acm_port_ops;
1178 1188
@@ -1227,9 +1237,14 @@ made_compressed_probe:
1227 goto alloc_fail7; 1237 goto alloc_fail7;
1228 } 1238 }
1229 1239
1230 usb_fill_bulk_urb(snd->urb, usb_dev, 1240 if (usb_endpoint_xfer_int(epwrite))
1231 usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), 1241 usb_fill_int_urb(snd->urb, usb_dev,
1232 NULL, acm->writesize, acm_write_bulk, snd); 1242 usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
1243 NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
1244 else
1245 usb_fill_bulk_urb(snd->urb, usb_dev,
1246 usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
1247 NULL, acm->writesize, acm_write_bulk, snd);
1233 snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1248 snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1234 snd->instance = acm; 1249 snd->instance = acm;
1235 } 1250 }
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 1602324808ba..c4a0ee8ffccf 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -126,6 +126,8 @@ struct acm {
126 unsigned int ctrl_caps; /* control capabilities from the class specific header */ 126 unsigned int ctrl_caps; /* control capabilities from the class specific header */
127 unsigned int susp_count; /* number of suspended interfaces */ 127 unsigned int susp_count; /* number of suspended interfaces */
128 int combined_interfaces:1; /* control and data collapsed */ 128 int combined_interfaces:1; /* control and data collapsed */
129 int is_int_ep:1; /* interrupt endpoints contrary to spec used */
130 u8 bInterval;
129 struct acm_wb *delayed_wb; /* write queued for a device about to be woken */ 131 struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
130}; 132};
131 133
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 0fe434505ac4..ba589d4ca8bc 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -15,7 +15,6 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/smp_lock.h>
19#include <linux/mutex.h> 18#include <linux/mutex.h>
20#include <linux/uaccess.h> 19#include <linux/uaccess.h>
21#include <linux/bitops.h> 20#include <linux/bitops.h>
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 3703789d0d2a..b09a527f7341 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -751,7 +751,7 @@ static int get_capabilities(struct usbtmc_device_data *data)
751{ 751{
752 struct device *dev = &data->usb_dev->dev; 752 struct device *dev = &data->usb_dev->dev;
753 char *buffer; 753 char *buffer;
754 int rv; 754 int rv = 0;
755 755
756 buffer = kmalloc(0x18, GFP_KERNEL); 756 buffer = kmalloc(0x18, GFP_KERNEL);
757 if (!buffer) 757 if (!buffer)
@@ -763,7 +763,7 @@ static int get_capabilities(struct usbtmc_device_data *data)
763 0, 0, buffer, 0x18, USBTMC_TIMEOUT); 763 0, 0, buffer, 0x18, USBTMC_TIMEOUT);
764 if (rv < 0) { 764 if (rv < 0) {
765 dev_err(dev, "usb_control_msg returned %d\n", rv); 765 dev_err(dev, "usb_control_msg returned %d\n", rv);
766 return rv; 766 goto err_out;
767 } 767 }
768 768
769 dev_dbg(dev, "GET_CAPABILITIES returned %x\n", buffer[0]); 769 dev_dbg(dev, "GET_CAPABILITIES returned %x\n", buffer[0]);
@@ -773,7 +773,8 @@ static int get_capabilities(struct usbtmc_device_data *data)
773 dev_dbg(dev, "USB488 device capabilities are %x\n", buffer[15]); 773 dev_dbg(dev, "USB488 device capabilities are %x\n", buffer[15]);
774 if (buffer[0] != USBTMC_STATUS_SUCCESS) { 774 if (buffer[0] != USBTMC_STATUS_SUCCESS) {
775 dev_err(dev, "GET_CAPABILITIES returned %x\n", buffer[0]); 775 dev_err(dev, "GET_CAPABILITIES returned %x\n", buffer[0]);
776 return -EPERM; 776 rv = -EPERM;
777 goto err_out;
777 } 778 }
778 779
779 data->capabilities.interface_capabilities = buffer[4]; 780 data->capabilities.interface_capabilities = buffer[4];
@@ -781,8 +782,9 @@ static int get_capabilities(struct usbtmc_device_data *data)
781 data->capabilities.usb488_interface_capabilities = buffer[14]; 782 data->capabilities.usb488_interface_capabilities = buffer[14];
782 data->capabilities.usb488_device_capabilities = buffer[15]; 783 data->capabilities.usb488_device_capabilities = buffer[15];
783 784
785err_out:
784 kfree(buffer); 786 kfree(buffer);
785 return 0; 787 return rv;
786} 788}
787 789
788#define capability_attribute(name) \ 790#define capability_attribute(name) \
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 69280c35b5cb..ad925946f869 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -28,7 +28,7 @@ comment "Miscellaneous USB options"
28 depends on USB 28 depends on USB
29 29
30config USB_DEVICEFS 30config USB_DEVICEFS
31 bool "USB device filesystem (DEPRECATED)" if EMBEDDED 31 bool "USB device filesystem (DEPRECATED)"
32 depends on USB 32 depends on USB
33 ---help--- 33 ---help---
34 If you say Y here (and to "/proc file system support" in the "File 34 If you say Y here (and to "/proc file system support" in the "File
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 24dfb33f90cb..a16c538d0132 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
80 int max_tx; 80 int max_tx;
81 int i; 81 int i;
82 82
83 /* Allocate space for the SS endpoint companion descriptor */
84 ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
85 GFP_KERNEL);
86 if (!ep->ss_ep_comp)
87 return -ENOMEM;
88 desc = (struct usb_ss_ep_comp_descriptor *) buffer; 83 desc = (struct usb_ss_ep_comp_descriptor *) buffer;
89 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { 84 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
90 dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " 85 dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
91 " interface %d altsetting %d ep %d: " 86 " interface %d altsetting %d ep %d: "
92 "using minimum values\n", 87 "using minimum values\n",
93 cfgno, inum, asnum, ep->desc.bEndpointAddress); 88 cfgno, inum, asnum, ep->desc.bEndpointAddress);
94 ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
95 ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
96 ep->ss_ep_comp->desc.bMaxBurst = 0;
97 /*
98 * Leave bmAttributes as zero, which will mean no streams for
99 * bulk, and isoc won't support multiple bursts of packets.
100 * With bursts of only one packet, and a Mult of 1, the max
101 * amount of data moved per endpoint service interval is one
102 * packet.
103 */
104 if (usb_endpoint_xfer_isoc(&ep->desc) ||
105 usb_endpoint_xfer_int(&ep->desc))
106 ep->ss_ep_comp->desc.wBytesPerInterval =
107 ep->desc.wMaxPacketSize;
108 /* 89 /*
109 * The next descriptor is for an Endpoint or Interface, 90 * The next descriptor is for an Endpoint or Interface,
110 * no extra descriptors to copy into the companion structure, 91 * no extra descriptors to copy into the companion structure,
111 * and we didn't eat up any of the buffer. 92 * and we didn't eat up any of the buffer.
112 */ 93 */
113 retval = 0; 94 return 0;
114 goto valid;
115 } 95 }
116 memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); 96 memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
117 desc = &ep->ss_ep_comp->desc; 97 desc = &ep->ss_ep_comp->desc;
@@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
320 buffer += i; 300 buffer += i;
321 size -= i; 301 size -= i;
322 302
303 /* Allocate space for the SS endpoint companion descriptor */
304 endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
305 GFP_KERNEL);
306 if (!endpoint->ss_ep_comp)
307 return -ENOMEM;
308
309 /* Fill in some default values (may be overwritten later) */
310 endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
311 endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
312 endpoint->ss_ep_comp->desc.bMaxBurst = 0;
313 /*
314 * Leave bmAttributes as zero, which will mean no streams for
315 * bulk, and isoc won't support multiple bursts of packets.
316 * With bursts of only one packet, and a Mult of 1, the max
317 * amount of data moved per endpoint service interval is one
318 * packet.
319 */
320 if (usb_endpoint_xfer_isoc(&endpoint->desc) ||
321 usb_endpoint_xfer_int(&endpoint->desc))
322 endpoint->ss_ep_comp->desc.wBytesPerInterval =
323 endpoint->desc.wMaxPacketSize;
324
323 if (size > 0) { 325 if (size > 0) {
324 retval = usb_parse_ss_endpoint_companion(ddev, cfgno, 326 retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
325 inum, asnum, endpoint, num_ep, buffer, 327 inum, asnum, endpoint, num_ep, buffer,
@@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
329 retval = buffer - buffer0; 331 retval = buffer - buffer0;
330 } 332 }
331 } else { 333 } else {
334 dev_warn(ddev, "config %d interface %d altsetting %d "
335 "endpoint 0x%X has no "
336 "SuperSpeed companion descriptor\n",
337 cfgno, inum, asnum, d->bEndpointAddress);
332 retval = buffer - buffer0; 338 retval = buffer - buffer0;
333 } 339 }
334 } else { 340 } else {
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 73c108d117b4..96f11715cd26 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -136,17 +136,19 @@ static const struct class_info clas_info[] =
136 {USB_CLASS_AUDIO, "audio"}, 136 {USB_CLASS_AUDIO, "audio"},
137 {USB_CLASS_COMM, "comm."}, 137 {USB_CLASS_COMM, "comm."},
138 {USB_CLASS_HID, "HID"}, 138 {USB_CLASS_HID, "HID"},
139 {USB_CLASS_HUB, "hub"},
140 {USB_CLASS_PHYSICAL, "PID"}, 139 {USB_CLASS_PHYSICAL, "PID"},
140 {USB_CLASS_STILL_IMAGE, "still"},
141 {USB_CLASS_PRINTER, "print"}, 141 {USB_CLASS_PRINTER, "print"},
142 {USB_CLASS_MASS_STORAGE, "stor."}, 142 {USB_CLASS_MASS_STORAGE, "stor."},
143 {USB_CLASS_HUB, "hub"},
143 {USB_CLASS_CDC_DATA, "data"}, 144 {USB_CLASS_CDC_DATA, "data"},
144 {USB_CLASS_APP_SPEC, "app."},
145 {USB_CLASS_VENDOR_SPEC, "vend."},
146 {USB_CLASS_STILL_IMAGE, "still"},
147 {USB_CLASS_CSCID, "scard"}, 145 {USB_CLASS_CSCID, "scard"},
148 {USB_CLASS_CONTENT_SEC, "c-sec"}, 146 {USB_CLASS_CONTENT_SEC, "c-sec"},
149 {USB_CLASS_VIDEO, "video"}, 147 {USB_CLASS_VIDEO, "video"},
148 {USB_CLASS_WIRELESS_CONTROLLER, "wlcon"},
149 {USB_CLASS_MISC, "misc"},
150 {USB_CLASS_APP_SPEC, "app."},
151 {USB_CLASS_VENDOR_SPEC, "vend."},
150 {-1, "unk."} /* leave as last */ 152 {-1, "unk."} /* leave as last */
151}; 153};
152 154
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 308609039c73..4247eccf858c 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -325,21 +325,34 @@ static void async_completed(struct urb *urb)
325 struct async *as = urb->context; 325 struct async *as = urb->context;
326 struct dev_state *ps = as->ps; 326 struct dev_state *ps = as->ps;
327 struct siginfo sinfo; 327 struct siginfo sinfo;
328 struct pid *pid = NULL;
329 uid_t uid = 0;
330 uid_t euid = 0;
331 u32 secid = 0;
332 int signr;
328 333
329 spin_lock(&ps->lock); 334 spin_lock(&ps->lock);
330 list_move_tail(&as->asynclist, &ps->async_completed); 335 list_move_tail(&as->asynclist, &ps->async_completed);
331 spin_unlock(&ps->lock);
332 as->status = urb->status; 336 as->status = urb->status;
333 if (as->signr) { 337 signr = as->signr;
338 if (signr) {
334 sinfo.si_signo = as->signr; 339 sinfo.si_signo = as->signr;
335 sinfo.si_errno = as->status; 340 sinfo.si_errno = as->status;
336 sinfo.si_code = SI_ASYNCIO; 341 sinfo.si_code = SI_ASYNCIO;
337 sinfo.si_addr = as->userurb; 342 sinfo.si_addr = as->userurb;
338 kill_pid_info_as_uid(as->signr, &sinfo, as->pid, as->uid, 343 pid = as->pid;
339 as->euid, as->secid); 344 uid = as->uid;
345 euid = as->euid;
346 secid = as->secid;
340 } 347 }
341 snoop(&urb->dev->dev, "urb complete\n"); 348 snoop(&urb->dev->dev, "urb complete\n");
342 snoop_urb(urb, as->userurb); 349 snoop_urb(urb, as->userurb);
350 spin_unlock(&ps->lock);
351
352 if (signr)
353 kill_pid_info_as_uid(sinfo.si_signo, &sinfo, pid, uid,
354 euid, secid);
355
343 wake_up(&ps->wait); 356 wake_up(&ps->wait);
344} 357}
345 358
@@ -582,7 +595,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
582 if (!ps) 595 if (!ps)
583 goto out; 596 goto out;
584 597
585 ret = -ENOENT; 598 ret = -ENODEV;
586 599
587 /* usbdev device-node */ 600 /* usbdev device-node */
588 if (imajor(inode) == USB_DEVICE_MAJOR) 601 if (imajor(inode) == USB_DEVICE_MAJOR)
@@ -982,7 +995,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
982 USBDEVFS_URB_ZERO_PACKET | 995 USBDEVFS_URB_ZERO_PACKET |
983 USBDEVFS_URB_NO_INTERRUPT)) 996 USBDEVFS_URB_NO_INTERRUPT))
984 return -EINVAL; 997 return -EINVAL;
985 if (!uurb->buffer) 998 if (uurb->buffer_length > 0 && !uurb->buffer)
986 return -EINVAL; 999 return -EINVAL;
987 if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && 1000 if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
988 (uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) { 1001 (uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) {
@@ -1038,11 +1051,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1038 is_in = 0; 1051 is_in = 0;
1039 uurb->endpoint &= ~USB_DIR_IN; 1052 uurb->endpoint &= ~USB_DIR_IN;
1040 } 1053 }
1041 if (!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
1042 uurb->buffer, uurb->buffer_length)) {
1043 kfree(dr);
1044 return -EFAULT;
1045 }
1046 snoop(&ps->dev->dev, "control urb: bRequest=%02x " 1054 snoop(&ps->dev->dev, "control urb: bRequest=%02x "
1047 "bRrequestType=%02x wValue=%04x " 1055 "bRrequestType=%02x wValue=%04x "
1048 "wIndex=%04x wLength=%04x\n", 1056 "wIndex=%04x wLength=%04x\n",
@@ -1062,9 +1070,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1062 uurb->number_of_packets = 0; 1070 uurb->number_of_packets = 0;
1063 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) 1071 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
1064 return -EINVAL; 1072 return -EINVAL;
1065 if (!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
1066 uurb->buffer, uurb->buffer_length))
1067 return -EFAULT;
1068 snoop(&ps->dev->dev, "bulk urb\n"); 1073 snoop(&ps->dev->dev, "bulk urb\n");
1069 break; 1074 break;
1070 1075
@@ -1106,28 +1111,35 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1106 return -EINVAL; 1111 return -EINVAL;
1107 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) 1112 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
1108 return -EINVAL; 1113 return -EINVAL;
1109 if (!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
1110 uurb->buffer, uurb->buffer_length))
1111 return -EFAULT;
1112 snoop(&ps->dev->dev, "interrupt urb\n"); 1114 snoop(&ps->dev->dev, "interrupt urb\n");
1113 break; 1115 break;
1114 1116
1115 default: 1117 default:
1116 return -EINVAL; 1118 return -EINVAL;
1117 } 1119 }
1118 as = alloc_async(uurb->number_of_packets); 1120 if (uurb->buffer_length > 0 &&
1119 if (!as) { 1121 !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
1122 uurb->buffer, uurb->buffer_length)) {
1120 kfree(isopkt); 1123 kfree(isopkt);
1121 kfree(dr); 1124 kfree(dr);
1122 return -ENOMEM; 1125 return -EFAULT;
1123 } 1126 }
1124 as->urb->transfer_buffer = kmalloc(uurb->buffer_length, GFP_KERNEL); 1127 as = alloc_async(uurb->number_of_packets);
1125 if (!as->urb->transfer_buffer) { 1128 if (!as) {
1126 kfree(isopkt); 1129 kfree(isopkt);
1127 kfree(dr); 1130 kfree(dr);
1128 free_async(as);
1129 return -ENOMEM; 1131 return -ENOMEM;
1130 } 1132 }
1133 if (uurb->buffer_length > 0) {
1134 as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
1135 GFP_KERNEL);
1136 if (!as->urb->transfer_buffer) {
1137 kfree(isopkt);
1138 kfree(dr);
1139 free_async(as);
1140 return -ENOMEM;
1141 }
1142 }
1131 as->urb->dev = ps->dev; 1143 as->urb->dev = ps->dev;
1132 as->urb->pipe = (uurb->type << 30) | 1144 as->urb->pipe = (uurb->type << 30) |
1133 __create_pipe(ps->dev, uurb->endpoint & 0xf) | 1145 __create_pipe(ps->dev, uurb->endpoint & 0xf) |
@@ -1169,7 +1181,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1169 kfree(isopkt); 1181 kfree(isopkt);
1170 as->ps = ps; 1182 as->ps = ps;
1171 as->userurb = arg; 1183 as->userurb = arg;
1172 if (uurb->endpoint & USB_DIR_IN) 1184 if (is_in && uurb->buffer_length > 0)
1173 as->userbuffer = uurb->buffer; 1185 as->userbuffer = uurb->buffer;
1174 else 1186 else
1175 as->userbuffer = NULL; 1187 as->userbuffer = NULL;
@@ -1179,9 +1191,9 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1179 as->uid = cred->uid; 1191 as->uid = cred->uid;
1180 as->euid = cred->euid; 1192 as->euid = cred->euid;
1181 security_task_getsecid(current, &as->secid); 1193 security_task_getsecid(current, &as->secid);
1182 if (!is_in) { 1194 if (!is_in && uurb->buffer_length > 0) {
1183 if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, 1195 if (copy_from_user(as->urb->transfer_buffer, uurb->buffer,
1184 as->urb->transfer_buffer_length)) { 1196 uurb->buffer_length)) {
1185 free_async(as); 1197 free_async(as);
1186 return -EFAULT; 1198 return -EFAULT;
1187 } 1199 }
@@ -1231,22 +1243,22 @@ static int processcompl(struct async *as, void __user * __user *arg)
1231 if (as->userbuffer) 1243 if (as->userbuffer)
1232 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1244 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
1233 urb->transfer_buffer_length)) 1245 urb->transfer_buffer_length))
1234 return -EFAULT; 1246 goto err_out;
1235 if (put_user(as->status, &userurb->status)) 1247 if (put_user(as->status, &userurb->status))
1236 return -EFAULT; 1248 goto err_out;
1237 if (put_user(urb->actual_length, &userurb->actual_length)) 1249 if (put_user(urb->actual_length, &userurb->actual_length))
1238 return -EFAULT; 1250 goto err_out;
1239 if (put_user(urb->error_count, &userurb->error_count)) 1251 if (put_user(urb->error_count, &userurb->error_count))
1240 return -EFAULT; 1252 goto err_out;
1241 1253
1242 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { 1254 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) {
1243 for (i = 0; i < urb->number_of_packets; i++) { 1255 for (i = 0; i < urb->number_of_packets; i++) {
1244 if (put_user(urb->iso_frame_desc[i].actual_length, 1256 if (put_user(urb->iso_frame_desc[i].actual_length,
1245 &userurb->iso_frame_desc[i].actual_length)) 1257 &userurb->iso_frame_desc[i].actual_length))
1246 return -EFAULT; 1258 goto err_out;
1247 if (put_user(urb->iso_frame_desc[i].status, 1259 if (put_user(urb->iso_frame_desc[i].status,
1248 &userurb->iso_frame_desc[i].status)) 1260 &userurb->iso_frame_desc[i].status))
1249 return -EFAULT; 1261 goto err_out;
1250 } 1262 }
1251 } 1263 }
1252 1264
@@ -1255,6 +1267,10 @@ static int processcompl(struct async *as, void __user * __user *arg)
1255 if (put_user(addr, (void __user * __user *)arg)) 1267 if (put_user(addr, (void __user * __user *)arg))
1256 return -EFAULT; 1268 return -EFAULT;
1257 return 0; 1269 return 0;
1270
1271err_out:
1272 free_async(as);
1273 return -EFAULT;
1258} 1274}
1259 1275
1260static struct async *reap_as(struct dev_state *ps) 1276static struct async *reap_as(struct dev_state *ps)
@@ -1305,7 +1321,8 @@ static int get_urb32(struct usbdevfs_urb *kurb,
1305 struct usbdevfs_urb32 __user *uurb) 1321 struct usbdevfs_urb32 __user *uurb)
1306{ 1322{
1307 __u32 uptr; 1323 __u32 uptr;
1308 if (get_user(kurb->type, &uurb->type) || 1324 if (!access_ok(VERIFY_READ, uurb, sizeof(*uurb)) ||
1325 __get_user(kurb->type, &uurb->type) ||
1309 __get_user(kurb->endpoint, &uurb->endpoint) || 1326 __get_user(kurb->endpoint, &uurb->endpoint) ||
1310 __get_user(kurb->status, &uurb->status) || 1327 __get_user(kurb->status, &uurb->status) ||
1311 __get_user(kurb->flags, &uurb->flags) || 1328 __get_user(kurb->flags, &uurb->flags) ||
@@ -1520,8 +1537,9 @@ static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg)
1520 u32 udata; 1537 u32 udata;
1521 1538
1522 uioc = compat_ptr((long)arg); 1539 uioc = compat_ptr((long)arg);
1523 if (get_user(ctrl.ifno, &uioc->ifno) || 1540 if (!access_ok(VERIFY_READ, uioc, sizeof(*uioc)) ||
1524 get_user(ctrl.ioctl_code, &uioc->ioctl_code) || 1541 __get_user(ctrl.ifno, &uioc->ifno) ||
1542 __get_user(ctrl.ioctl_code, &uioc->ioctl_code) ||
1525 __get_user(udata, &uioc->data)) 1543 __get_user(udata, &uioc->data))
1526 return -EFAULT; 1544 return -EFAULT;
1527 ctrl.data = compat_ptr(udata); 1545 ctrl.data = compat_ptr(udata);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ce3f453f02ef..95ccfa0b9fc5 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -648,7 +648,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
648 struct urb *urb; 648 struct urb *urb;
649 int length; 649 int length;
650 unsigned long flags; 650 unsigned long flags;
651 char buffer[4]; /* Any root hubs with > 31 ports? */ 651 char buffer[6]; /* Any root hubs with > 31 ports? */
652 652
653 if (unlikely(!hcd->rh_registered)) 653 if (unlikely(!hcd->rh_registered))
654 return; 654 return;
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index d397ecfd5b17..ec5c67ea07b7 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -227,6 +227,10 @@ struct hc_driver {
227 /* has a port been handed over to a companion? */ 227 /* has a port been handed over to a companion? */
228 int (*port_handed_over)(struct usb_hcd *, int); 228 int (*port_handed_over)(struct usb_hcd *, int);
229 229
230 /* CLEAR_TT_BUFFER completion callback */
231 void (*clear_tt_buffer_complete)(struct usb_hcd *,
232 struct usb_host_endpoint *);
233
230 /* xHCI specific functions */ 234 /* xHCI specific functions */
231 /* Called by usb_alloc_dev to alloc HC device structures */ 235 /* Called by usb_alloc_dev to alloc HC device structures */
232 int (*alloc_dev)(struct usb_hcd *, struct usb_device *); 236 int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 2af3b4f06054..71f86c60d83c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -450,10 +450,10 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
450 * talking to TTs must queue control transfers (not just bulk and iso), so 450 * talking to TTs must queue control transfers (not just bulk and iso), so
451 * both can talk to the same hub concurrently. 451 * both can talk to the same hub concurrently.
452 */ 452 */
453static void hub_tt_kevent (struct work_struct *work) 453static void hub_tt_work(struct work_struct *work)
454{ 454{
455 struct usb_hub *hub = 455 struct usb_hub *hub =
456 container_of(work, struct usb_hub, tt.kevent); 456 container_of(work, struct usb_hub, tt.clear_work);
457 unsigned long flags; 457 unsigned long flags;
458 int limit = 100; 458 int limit = 100;
459 459
@@ -462,6 +462,7 @@ static void hub_tt_kevent (struct work_struct *work)
462 struct list_head *next; 462 struct list_head *next;
463 struct usb_tt_clear *clear; 463 struct usb_tt_clear *clear;
464 struct usb_device *hdev = hub->hdev; 464 struct usb_device *hdev = hub->hdev;
465 const struct hc_driver *drv;
465 int status; 466 int status;
466 467
467 next = hub->tt.clear_list.next; 468 next = hub->tt.clear_list.next;
@@ -471,21 +472,25 @@ static void hub_tt_kevent (struct work_struct *work)
471 /* drop lock so HCD can concurrently report other TT errors */ 472 /* drop lock so HCD can concurrently report other TT errors */
472 spin_unlock_irqrestore (&hub->tt.lock, flags); 473 spin_unlock_irqrestore (&hub->tt.lock, flags);
473 status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt); 474 status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt);
474 spin_lock_irqsave (&hub->tt.lock, flags);
475
476 if (status) 475 if (status)
477 dev_err (&hdev->dev, 476 dev_err (&hdev->dev,
478 "clear tt %d (%04x) error %d\n", 477 "clear tt %d (%04x) error %d\n",
479 clear->tt, clear->devinfo, status); 478 clear->tt, clear->devinfo, status);
479
480 /* Tell the HCD, even if the operation failed */
481 drv = clear->hcd->driver;
482 if (drv->clear_tt_buffer_complete)
483 (drv->clear_tt_buffer_complete)(clear->hcd, clear->ep);
484
480 kfree(clear); 485 kfree(clear);
486 spin_lock_irqsave(&hub->tt.lock, flags);
481 } 487 }
482 spin_unlock_irqrestore (&hub->tt.lock, flags); 488 spin_unlock_irqrestore (&hub->tt.lock, flags);
483} 489}
484 490
485/** 491/**
486 * usb_hub_tt_clear_buffer - clear control/bulk TT state in high speed hub 492 * usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub
487 * @udev: the device whose split transaction failed 493 * @urb: an URB associated with the failed or incomplete split transaction
488 * @pipe: identifies the endpoint of the failed transaction
489 * 494 *
490 * High speed HCDs use this to tell the hub driver that some split control or 495 * High speed HCDs use this to tell the hub driver that some split control or
491 * bulk transaction failed in a way that requires clearing internal state of 496 * bulk transaction failed in a way that requires clearing internal state of
@@ -495,8 +500,10 @@ static void hub_tt_kevent (struct work_struct *work)
495 * It may not be possible for that hub to handle additional full (or low) 500 * It may not be possible for that hub to handle additional full (or low)
496 * speed transactions until that state is fully cleared out. 501 * speed transactions until that state is fully cleared out.
497 */ 502 */
498void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe) 503int usb_hub_clear_tt_buffer(struct urb *urb)
499{ 504{
505 struct usb_device *udev = urb->dev;
506 int pipe = urb->pipe;
500 struct usb_tt *tt = udev->tt; 507 struct usb_tt *tt = udev->tt;
501 unsigned long flags; 508 unsigned long flags;
502 struct usb_tt_clear *clear; 509 struct usb_tt_clear *clear;
@@ -508,7 +515,7 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe)
508 if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) { 515 if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) {
509 dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n"); 516 dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
510 /* FIXME recover somehow ... RESET_TT? */ 517 /* FIXME recover somehow ... RESET_TT? */
511 return; 518 return -ENOMEM;
512 } 519 }
513 520
514 /* info that CLEAR_TT_BUFFER needs */ 521 /* info that CLEAR_TT_BUFFER needs */
@@ -520,14 +527,19 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe)
520 : (USB_ENDPOINT_XFER_BULK << 11); 527 : (USB_ENDPOINT_XFER_BULK << 11);
521 if (usb_pipein (pipe)) 528 if (usb_pipein (pipe))
522 clear->devinfo |= 1 << 15; 529 clear->devinfo |= 1 << 15;
523 530
531 /* info for completion callback */
532 clear->hcd = bus_to_hcd(udev->bus);
533 clear->ep = urb->ep;
534
524 /* tell keventd to clear state for this TT */ 535 /* tell keventd to clear state for this TT */
525 spin_lock_irqsave (&tt->lock, flags); 536 spin_lock_irqsave (&tt->lock, flags);
526 list_add_tail (&clear->clear_list, &tt->clear_list); 537 list_add_tail (&clear->clear_list, &tt->clear_list);
527 schedule_work (&tt->kevent); 538 schedule_work(&tt->clear_work);
528 spin_unlock_irqrestore (&tt->lock, flags); 539 spin_unlock_irqrestore (&tt->lock, flags);
540 return 0;
529} 541}
530EXPORT_SYMBOL_GPL(usb_hub_tt_clear_buffer); 542EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer);
531 543
532/* If do_delay is false, return the number of milliseconds the caller 544/* If do_delay is false, return the number of milliseconds the caller
533 * needs to delay. 545 * needs to delay.
@@ -818,7 +830,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
818 if (hub->has_indicators) 830 if (hub->has_indicators)
819 cancel_delayed_work_sync(&hub->leds); 831 cancel_delayed_work_sync(&hub->leds);
820 if (hub->tt.hub) 832 if (hub->tt.hub)
821 cancel_work_sync(&hub->tt.kevent); 833 cancel_work_sync(&hub->tt.clear_work);
822} 834}
823 835
824/* caller has locked the hub device */ 836/* caller has locked the hub device */
@@ -935,7 +947,7 @@ static int hub_configure(struct usb_hub *hub,
935 947
936 spin_lock_init (&hub->tt.lock); 948 spin_lock_init (&hub->tt.lock);
937 INIT_LIST_HEAD (&hub->tt.clear_list); 949 INIT_LIST_HEAD (&hub->tt.clear_list);
938 INIT_WORK (&hub->tt.kevent, hub_tt_kevent); 950 INIT_WORK(&hub->tt.clear_work, hub_tt_work);
939 switch (hdev->descriptor.bDeviceProtocol) { 951 switch (hdev->descriptor.bDeviceProtocol) {
940 case 0: 952 case 0:
941 break; 953 break;
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 889c0f32a40b..de8081f065ed 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -188,16 +188,18 @@ struct usb_tt {
188 /* for control/bulk error recovery (CLEAR_TT_BUFFER) */ 188 /* for control/bulk error recovery (CLEAR_TT_BUFFER) */
189 spinlock_t lock; 189 spinlock_t lock;
190 struct list_head clear_list; /* of usb_tt_clear */ 190 struct list_head clear_list; /* of usb_tt_clear */
191 struct work_struct kevent; 191 struct work_struct clear_work;
192}; 192};
193 193
194struct usb_tt_clear { 194struct usb_tt_clear {
195 struct list_head clear_list; 195 struct list_head clear_list;
196 unsigned tt; 196 unsigned tt;
197 u16 devinfo; 197 u16 devinfo;
198 struct usb_hcd *hcd;
199 struct usb_host_endpoint *ep;
198}; 200};
199 201
200extern void usb_hub_tt_clear_buffer(struct usb_device *dev, int pipe); 202extern int usb_hub_clear_tt_buffer(struct urb *urb);
201extern void usb_ep0_reinit(struct usb_device *); 203extern void usb_ep0_reinit(struct usb_device *);
202 204
203#endif /* __LINUX_HUB_H */ 205#endif /* __LINUX_HUB_H */
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 2bed83caacb1..9720e699f472 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -806,6 +806,48 @@ static int usb_string_sub(struct usb_device *dev, unsigned int langid,
806 return rc; 806 return rc;
807} 807}
808 808
809static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf)
810{
811 int err;
812
813 if (dev->have_langid)
814 return 0;
815
816 if (dev->string_langid < 0)
817 return -EPIPE;
818
819 err = usb_string_sub(dev, 0, 0, tbuf);
820
821 /* If the string was reported but is malformed, default to english
822 * (0x0409) */
823 if (err == -ENODATA || (err > 0 && err < 4)) {
824 dev->string_langid = 0x0409;
825 dev->have_langid = 1;
826 dev_err(&dev->dev,
827 "string descriptor 0 malformed (err = %d), "
828 "defaulting to 0x%04x\n",
829 err, dev->string_langid);
830 return 0;
831 }
832
833 /* In case of all other errors, we assume the device is not able to
834 * deal with strings at all. Set string_langid to -1 in order to
835 * prevent any string to be retrieved from the device */
836 if (err < 0) {
837 dev_err(&dev->dev, "string descriptor 0 read error: %d\n",
838 err);
839 dev->string_langid = -1;
840 return -EPIPE;
841 }
842
843 /* always use the first langid listed */
844 dev->string_langid = tbuf[2] | (tbuf[3] << 8);
845 dev->have_langid = 1;
846 dev_dbg(&dev->dev, "default language 0x%04x\n",
847 dev->string_langid);
848 return 0;
849}
850
809/** 851/**
810 * usb_string - returns UTF-8 version of a string descriptor 852 * usb_string - returns UTF-8 version of a string descriptor
811 * @dev: the device whose string descriptor is being retrieved 853 * @dev: the device whose string descriptor is being retrieved
@@ -837,24 +879,9 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
837 if (!tbuf) 879 if (!tbuf)
838 return -ENOMEM; 880 return -ENOMEM;
839 881
840 /* get langid for strings if it's not yet known */ 882 err = usb_get_langid(dev, tbuf);
841 if (!dev->have_langid) { 883 if (err < 0)
842 err = usb_string_sub(dev, 0, 0, tbuf); 884 goto errout;
843 if (err < 0) {
844 dev_err(&dev->dev,
845 "string descriptor 0 read error: %d\n",
846 err);
847 } else if (err < 4) {
848 dev_err(&dev->dev, "string descriptor 0 too short\n");
849 } else {
850 dev->string_langid = tbuf[2] | (tbuf[3] << 8);
851 /* always use the first langid listed */
852 dev_dbg(&dev->dev, "default language 0x%04x\n",
853 dev->string_langid);
854 }
855
856 dev->have_langid = 1;
857 }
858 885
859 err = usb_string_sub(dev, dev->string_langid, index, tbuf); 886 err = usb_string_sub(dev, dev->string_langid, index, tbuf);
860 if (err < 0) 887 if (err < 0)
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 5d1ddf485d1e..7f8e83a954ac 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -286,6 +286,27 @@ config USB_S3C_HSOTG
286 default USB_GADGET 286 default USB_GADGET
287 select USB_GADGET_SELECTED 287 select USB_GADGET_SELECTED
288 288
289config USB_GADGET_IMX
290 boolean "Freescale IMX USB Peripheral Controller"
291 depends on ARCH_MX1
292 help
293 Freescale's IMX series include an integrated full speed
294 USB 1.1 device controller. The controller in the IMX series
295 is register-compatible.
296
297 It has Six fixed-function endpoints, as well as endpoint
298 zero (for control transfers).
299
300 Say "y" to link the driver statically, or "m" to build a
301 dynamically linked module called "imx_udc" and force all
302 gadget drivers to also be dynamically linked.
303
304config USB_IMX
305 tristate
306 depends on USB_GADGET_IMX
307 default USB_GADGET
308 select USB_GADGET_SELECTED
309
289config USB_GADGET_S3C2410 310config USB_GADGET_S3C2410
290 boolean "S3C2410 USB Device Controller" 311 boolean "S3C2410 USB Device Controller"
291 depends on ARCH_S3C2410 312 depends on ARCH_S3C2410
@@ -321,27 +342,6 @@ config USB_GADGET_MUSB_HDRC
321 This OTG-capable silicon IP is used in dual designs including 342 This OTG-capable silicon IP is used in dual designs including
322 the TI DaVinci, OMAP 243x, OMAP 343x, TUSB 6010, and ADI Blackfin 343 the TI DaVinci, OMAP 243x, OMAP 343x, TUSB 6010, and ADI Blackfin
323 344
324config USB_GADGET_IMX
325 boolean "Freescale IMX USB Peripheral Controller"
326 depends on ARCH_MX1
327 help
328 Freescale's IMX series include an integrated full speed
329 USB 1.1 device controller. The controller in the IMX series
330 is register-compatible.
331
332 It has Six fixed-function endpoints, as well as endpoint
333 zero (for control transfers).
334
335 Say "y" to link the driver statically, or "m" to build a
336 dynamically linked module called "imx_udc" and force all
337 gadget drivers to also be dynamically linked.
338
339config USB_IMX
340 tristate
341 depends on USB_GADGET_IMX
342 default USB_GADGET
343 select USB_GADGET_SELECTED
344
345config USB_GADGET_M66592 345config USB_GADGET_M66592
346 boolean "Renesas M66592 USB Peripheral Controller" 346 boolean "Renesas M66592 USB Peripheral Controller"
347 select USB_GADGET_DUALSPEED 347 select USB_GADGET_DUALSPEED
@@ -604,6 +604,7 @@ config USB_ZERO_HNPTEST
604config USB_AUDIO 604config USB_AUDIO
605 tristate "Audio Gadget (EXPERIMENTAL)" 605 tristate "Audio Gadget (EXPERIMENTAL)"
606 depends on SND 606 depends on SND
607 select SND_PCM
607 help 608 help
608 Gadget Audio is compatible with USB Audio Class specification 1.0. 609 Gadget Audio is compatible with USB Audio Class specification 1.0.
609 It will include at least one AudioControl interface, zero or more 610 It will include at least one AudioControl interface, zero or more
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 826f3adde5d8..77352ccc245e 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -48,7 +48,6 @@
48#include <linux/ioport.h> 48#include <linux/ioport.h>
49#include <linux/sched.h> 49#include <linux/sched.h>
50#include <linux/slab.h> 50#include <linux/slab.h>
51#include <linux/smp_lock.h>
52#include <linux/errno.h> 51#include <linux/errno.h>
53#include <linux/init.h> 52#include <linux/init.h>
54#include <linux/timer.h> 53#include <linux/timer.h>
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index 94de7e864614..9f80f4e970bd 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -42,9 +42,9 @@
42 * Instead: allocate your own, using normal USB-IF procedures. 42 * Instead: allocate your own, using normal USB-IF procedures.
43 */ 43 */
44 44
45/* Thanks to NetChip Technologies for donating this product ID. */ 45/* Thanks to Linux Foundation for donating this product ID. */
46#define AUDIO_VENDOR_NUM 0x0525 /* NetChip */ 46#define AUDIO_VENDOR_NUM 0x1d6b /* Linux Foundation */
47#define AUDIO_PRODUCT_NUM 0xa4a1 /* Linux-USB Audio Gadget */ 47#define AUDIO_PRODUCT_NUM 0x0101 /* Linux-USB Audio Gadget */
48 48
49/*-------------------------------------------------------------------------*/ 49/*-------------------------------------------------------------------------*/
50 50
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index d006dc652e02..bd102f5052ba 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -293,15 +293,16 @@ static int __init eth_bind(struct usb_composite_dev *cdev)
293 /* CDC Subset */ 293 /* CDC Subset */
294 eth_config_driver.label = "CDC Subset/SAFE"; 294 eth_config_driver.label = "CDC Subset/SAFE";
295 295
296 device_desc.idVendor = cpu_to_le16(SIMPLE_VENDOR_NUM), 296 device_desc.idVendor = cpu_to_le16(SIMPLE_VENDOR_NUM);
297 device_desc.idProduct = cpu_to_le16(SIMPLE_PRODUCT_NUM), 297 device_desc.idProduct = cpu_to_le16(SIMPLE_PRODUCT_NUM);
298 device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC; 298 if (!has_rndis())
299 device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
299 } 300 }
300 301
301 if (has_rndis()) { 302 if (has_rndis()) {
302 /* RNDIS plus ECM-or-Subset */ 303 /* RNDIS plus ECM-or-Subset */
303 device_desc.idVendor = cpu_to_le16(RNDIS_VENDOR_NUM), 304 device_desc.idVendor = cpu_to_le16(RNDIS_VENDOR_NUM);
304 device_desc.idProduct = cpu_to_le16(RNDIS_PRODUCT_NUM), 305 device_desc.idProduct = cpu_to_le16(RNDIS_PRODUCT_NUM);
305 device_desc.bNumConfigurations = 2; 306 device_desc.bNumConfigurations = 2;
306 } 307 }
307 308
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index 6829d5961359..a3913519fd58 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -34,7 +34,6 @@
34#include <linux/ioport.h> 34#include <linux/ioport.h>
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/smp_lock.h>
38#include <linux/errno.h> 37#include <linux/errno.h>
39#include <linux/init.h> 38#include <linux/init.h>
40#include <linux/timer.h> 39#include <linux/timer.h>
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 0ce4e2819847..ed21e263f832 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -139,7 +139,7 @@ static int is_vbus_present(void)
139{ 139{
140 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 140 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
141 141
142 if (mach->gpio_vbus) { 142 if (gpio_is_valid(mach->gpio_vbus)) {
143 int value = gpio_get_value(mach->gpio_vbus); 143 int value = gpio_get_value(mach->gpio_vbus);
144 144
145 if (mach->gpio_vbus_inverted) 145 if (mach->gpio_vbus_inverted)
@@ -158,7 +158,7 @@ static void pullup_off(void)
158 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 158 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
159 int off_level = mach->gpio_pullup_inverted; 159 int off_level = mach->gpio_pullup_inverted;
160 160
161 if (mach->gpio_pullup) 161 if (gpio_is_valid(mach->gpio_pullup))
162 gpio_set_value(mach->gpio_pullup, off_level); 162 gpio_set_value(mach->gpio_pullup, off_level);
163 else if (mach->udc_command) 163 else if (mach->udc_command)
164 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT); 164 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
@@ -169,7 +169,7 @@ static void pullup_on(void)
169 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 169 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
170 int on_level = !mach->gpio_pullup_inverted; 170 int on_level = !mach->gpio_pullup_inverted;
171 171
172 if (mach->gpio_pullup) 172 if (gpio_is_valid(mach->gpio_pullup))
173 gpio_set_value(mach->gpio_pullup, on_level); 173 gpio_set_value(mach->gpio_pullup, on_level);
174 else if (mach->udc_command) 174 else if (mach->udc_command)
175 mach->udc_command(PXA2XX_UDC_CMD_CONNECT); 175 mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
@@ -1000,7 +1000,7 @@ static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active)
1000 udc = container_of(_gadget, struct pxa25x_udc, gadget); 1000 udc = container_of(_gadget, struct pxa25x_udc, gadget);
1001 1001
1002 /* not all boards support pullup control */ 1002 /* not all boards support pullup control */
1003 if (!udc->mach->gpio_pullup && !udc->mach->udc_command) 1003 if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
1004 return -EOPNOTSUPP; 1004 return -EOPNOTSUPP;
1005 1005
1006 udc->pullup = (is_active != 0); 1006 udc->pullup = (is_active != 0);
@@ -1802,11 +1802,13 @@ pxa25x_udc_irq(int irq, void *_dev)
1802 USIR0 |= tmp; 1802 USIR0 |= tmp;
1803 handled = 1; 1803 handled = 1;
1804 } 1804 }
1805#ifndef CONFIG_USB_PXA25X_SMALL
1805 if (usir1 & tmp) { 1806 if (usir1 & tmp) {
1806 handle_ep(&dev->ep[i+8]); 1807 handle_ep(&dev->ep[i+8]);
1807 USIR1 |= tmp; 1808 USIR1 |= tmp;
1808 handled = 1; 1809 handled = 1;
1809 } 1810 }
1811#endif
1810 } 1812 }
1811 } 1813 }
1812 1814
@@ -2160,7 +2162,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
2160 dev->dev = &pdev->dev; 2162 dev->dev = &pdev->dev;
2161 dev->mach = pdev->dev.platform_data; 2163 dev->mach = pdev->dev.platform_data;
2162 2164
2163 if (dev->mach->gpio_vbus) { 2165 if (gpio_is_valid(dev->mach->gpio_vbus)) {
2164 if ((retval = gpio_request(dev->mach->gpio_vbus, 2166 if ((retval = gpio_request(dev->mach->gpio_vbus,
2165 "pxa25x_udc GPIO VBUS"))) { 2167 "pxa25x_udc GPIO VBUS"))) {
2166 dev_dbg(&pdev->dev, 2168 dev_dbg(&pdev->dev,
@@ -2173,7 +2175,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
2173 } else 2175 } else
2174 vbus_irq = 0; 2176 vbus_irq = 0;
2175 2177
2176 if (dev->mach->gpio_pullup) { 2178 if (gpio_is_valid(dev->mach->gpio_pullup)) {
2177 if ((retval = gpio_request(dev->mach->gpio_pullup, 2179 if ((retval = gpio_request(dev->mach->gpio_pullup,
2178 "pca25x_udc GPIO PULLUP"))) { 2180 "pca25x_udc GPIO PULLUP"))) {
2179 dev_dbg(&pdev->dev, 2181 dev_dbg(&pdev->dev,
@@ -2256,10 +2258,10 @@ lubbock_fail0:
2256#endif 2258#endif
2257 free_irq(irq, dev); 2259 free_irq(irq, dev);
2258 err_irq1: 2260 err_irq1:
2259 if (dev->mach->gpio_pullup) 2261 if (gpio_is_valid(dev->mach->gpio_pullup))
2260 gpio_free(dev->mach->gpio_pullup); 2262 gpio_free(dev->mach->gpio_pullup);
2261 err_gpio_pullup: 2263 err_gpio_pullup:
2262 if (dev->mach->gpio_vbus) 2264 if (gpio_is_valid(dev->mach->gpio_vbus))
2263 gpio_free(dev->mach->gpio_vbus); 2265 gpio_free(dev->mach->gpio_vbus);
2264 err_gpio_vbus: 2266 err_gpio_vbus:
2265 clk_put(dev->clk); 2267 clk_put(dev->clk);
@@ -2294,11 +2296,11 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev)
2294 free_irq(LUBBOCK_USB_IRQ, dev); 2296 free_irq(LUBBOCK_USB_IRQ, dev);
2295 } 2297 }
2296#endif 2298#endif
2297 if (dev->mach->gpio_vbus) { 2299 if (gpio_is_valid(dev->mach->gpio_vbus)) {
2298 free_irq(gpio_to_irq(dev->mach->gpio_vbus), dev); 2300 free_irq(gpio_to_irq(dev->mach->gpio_vbus), dev);
2299 gpio_free(dev->mach->gpio_vbus); 2301 gpio_free(dev->mach->gpio_vbus);
2300 } 2302 }
2301 if (dev->mach->gpio_pullup) 2303 if (gpio_is_valid(dev->mach->gpio_pullup))
2302 gpio_free(dev->mach->gpio_pullup); 2304 gpio_free(dev->mach->gpio_pullup);
2303 2305
2304 clk_put(dev->clk); 2306 clk_put(dev->clk);
@@ -2329,7 +2331,7 @@ static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state)
2329 struct pxa25x_udc *udc = platform_get_drvdata(dev); 2331 struct pxa25x_udc *udc = platform_get_drvdata(dev);
2330 unsigned long flags; 2332 unsigned long flags;
2331 2333
2332 if (!udc->mach->gpio_pullup && !udc->mach->udc_command) 2334 if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
2333 WARNING("USB host won't detect disconnect!\n"); 2335 WARNING("USB host won't detect disconnect!\n");
2334 udc->suspended = 1; 2336 udc->suspended = 1;
2335 2337
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 2b4660e08c4d..ca41b0b5afb3 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -442,6 +442,8 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
442 442
443 case OID_802_3_MAC_OPTIONS: 443 case OID_802_3_MAC_OPTIONS:
444 pr_debug("%s: OID_802_3_MAC_OPTIONS\n", __func__); 444 pr_debug("%s: OID_802_3_MAC_OPTIONS\n", __func__);
445 *outbuf = cpu_to_le32(0);
446 retval = 0;
445 break; 447 break;
446 448
447 /* ieee802.3 statistics OIDs (table 4-4) */ 449 /* ieee802.3 statistics OIDs (table 4-4) */
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 9a2b8920532d..a9b452fe6221 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -28,7 +28,6 @@
28#include <linux/ioport.h> 28#include <linux/ioport.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/smp_lock.h>
32#include <linux/errno.h> 31#include <linux/errno.h>
33#include <linux/init.h> 32#include <linux/init.h>
34#include <linux/timer.h> 33#include <linux/timer.h>
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 1576a0520adf..1a920c70b5a1 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -181,26 +181,27 @@ config USB_OHCI_HCD_PPC_SOC
181 Enables support for the USB controller on the MPC52xx or 181 Enables support for the USB controller on the MPC52xx or
182 STB03xxx processor chip. If unsure, say Y. 182 STB03xxx processor chip. If unsure, say Y.
183 183
184config USB_OHCI_HCD_PPC_OF
185 bool "OHCI support for PPC USB controller on OF platform bus"
186 depends on USB_OHCI_HCD && PPC_OF
187 default y
188 ---help---
189 Enables support for the USB controller PowerPC present on the
190 OpenFirmware platform bus.
191
192config USB_OHCI_HCD_PPC_OF_BE 184config USB_OHCI_HCD_PPC_OF_BE
193 bool "Support big endian HC" 185 bool "OHCI support for OF platform bus (big endian)"
194 depends on USB_OHCI_HCD_PPC_OF 186 depends on USB_OHCI_HCD && PPC_OF
195 default y
196 select USB_OHCI_BIG_ENDIAN_DESC 187 select USB_OHCI_BIG_ENDIAN_DESC
197 select USB_OHCI_BIG_ENDIAN_MMIO 188 select USB_OHCI_BIG_ENDIAN_MMIO
189 ---help---
190 Enables support for big-endian USB controllers present on the
191 OpenFirmware platform bus.
198 192
199config USB_OHCI_HCD_PPC_OF_LE 193config USB_OHCI_HCD_PPC_OF_LE
200 bool "Support little endian HC" 194 bool "OHCI support for OF platform bus (little endian)"
201 depends on USB_OHCI_HCD_PPC_OF 195 depends on USB_OHCI_HCD && PPC_OF
202 default n
203 select USB_OHCI_LITTLE_ENDIAN 196 select USB_OHCI_LITTLE_ENDIAN
197 ---help---
198 Enables support for little-endian USB controllers present on the
199 OpenFirmware platform bus.
200
201config USB_OHCI_HCD_PPC_OF
202 bool
203 depends on USB_OHCI_HCD && PPC_OF
204 default USB_OHCI_HCD_PPC_OF_BE || USB_OHCI_HCD_PPC_OF_LE
204 205
205config USB_OHCI_HCD_PCI 206config USB_OHCI_HCD_PCI
206 bool "OHCI support for PCI-bus USB controllers" 207 bool "OHCI support for PCI-bus USB controllers"
@@ -337,10 +338,10 @@ config USB_R8A66597_HCD
337 338
338config SUPERH_ON_CHIP_R8A66597 339config SUPERH_ON_CHIP_R8A66597
339 boolean "Enable SuperH on-chip R8A66597 USB" 340 boolean "Enable SuperH on-chip R8A66597 USB"
340 depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723) 341 depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7724)
341 help 342 help
342 This driver enables support for the on-chip R8A66597 in the 343 This driver enables support for the on-chip R8A66597 in the
343 SH7366 and SH7723 processors. 344 SH7366, SH7723 and SH7724 processors.
344 345
345config USB_WHCI_HCD 346config USB_WHCI_HCD
346 tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)" 347 tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index c3a778bd359c..59d208d94d4e 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -113,6 +113,8 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
113 .bus_resume = ehci_bus_resume, 113 .bus_resume = ehci_bus_resume,
114 .relinquish_port = ehci_relinquish_port, 114 .relinquish_port = ehci_relinquish_port,
115 .port_handed_over = ehci_port_handed_over, 115 .port_handed_over = ehci_port_handed_over,
116
117 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
116}; 118};
117 119
118static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev) 120static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index bf86809c5120..991174937db3 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -325,6 +325,8 @@ static const struct hc_driver ehci_fsl_hc_driver = {
325 .bus_resume = ehci_bus_resume, 325 .bus_resume = ehci_bus_resume,
326 .relinquish_port = ehci_relinquish_port, 326 .relinquish_port = ehci_relinquish_port,
327 .port_handed_over = ehci_port_handed_over, 327 .port_handed_over = ehci_port_handed_over,
328
329 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
328}; 330};
329 331
330static int ehci_fsl_drv_probe(struct platform_device *pdev) 332static int ehci_fsl_drv_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 2b72473544d3..11c627ce6022 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -903,7 +903,8 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
903 /* already started */ 903 /* already started */
904 break; 904 break;
905 case QH_STATE_IDLE: 905 case QH_STATE_IDLE:
906 WARN_ON(1); 906 /* QH might be waiting for a Clear-TT-Buffer */
907 qh_completions(ehci, qh);
907 break; 908 break;
908 } 909 }
909 break; 910 break;
@@ -1003,6 +1004,8 @@ idle_timeout:
1003 schedule_timeout_uninterruptible(1); 1004 schedule_timeout_uninterruptible(1);
1004 goto rescan; 1005 goto rescan;
1005 case QH_STATE_IDLE: /* fully unlinked */ 1006 case QH_STATE_IDLE: /* fully unlinked */
1007 if (qh->clearing_tt)
1008 goto idle_timeout;
1006 if (list_empty (&qh->qtd_list)) { 1009 if (list_empty (&qh->qtd_list)) {
1007 qh_put (qh); 1010 qh_put (qh);
1008 break; 1011 break;
@@ -1030,12 +1033,14 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1030 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 1033 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
1031 struct ehci_qh *qh; 1034 struct ehci_qh *qh;
1032 int eptype = usb_endpoint_type(&ep->desc); 1035 int eptype = usb_endpoint_type(&ep->desc);
1036 int epnum = usb_endpoint_num(&ep->desc);
1037 int is_out = usb_endpoint_dir_out(&ep->desc);
1038 unsigned long flags;
1033 1039
1034 if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT) 1040 if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
1035 return; 1041 return;
1036 1042
1037 rescan: 1043 spin_lock_irqsave(&ehci->lock, flags);
1038 spin_lock_irq(&ehci->lock);
1039 qh = ep->hcpriv; 1044 qh = ep->hcpriv;
1040 1045
1041 /* For Bulk and Interrupt endpoints we maintain the toggle state 1046 /* For Bulk and Interrupt endpoints we maintain the toggle state
@@ -1044,29 +1049,24 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1044 * the toggle bit in the QH. 1049 * the toggle bit in the QH.
1045 */ 1050 */
1046 if (qh) { 1051 if (qh) {
1052 usb_settoggle(qh->dev, epnum, is_out, 0);
1047 if (!list_empty(&qh->qtd_list)) { 1053 if (!list_empty(&qh->qtd_list)) {
1048 WARN_ONCE(1, "clear_halt for a busy endpoint\n"); 1054 WARN_ONCE(1, "clear_halt for a busy endpoint\n");
1049 } else if (qh->qh_state == QH_STATE_IDLE) { 1055 } else if (qh->qh_state == QH_STATE_LINKED) {
1050 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); 1056
1051 } else { 1057 /* The toggle value in the QH can't be updated
1052 /* It's not safe to write into the overlay area 1058 * while the QH is active. Unlink it now;
1053 * while the QH is active. Unlink it first and 1059 * re-linking will call qh_refresh().
1054 * wait for the unlink to complete.
1055 */ 1060 */
1056 if (qh->qh_state == QH_STATE_LINKED) { 1061 if (eptype == USB_ENDPOINT_XFER_BULK) {
1057 if (eptype == USB_ENDPOINT_XFER_BULK) { 1062 unlink_async(ehci, qh);
1058 unlink_async(ehci, qh); 1063 } else {
1059 } else { 1064 intr_deschedule(ehci, qh);
1060 intr_deschedule(ehci, qh); 1065 (void) qh_schedule(ehci, qh);
1061 (void) qh_schedule(ehci, qh);
1062 }
1063 } 1066 }
1064 spin_unlock_irq(&ehci->lock);
1065 schedule_timeout_uninterruptible(1);
1066 goto rescan;
1067 } 1067 }
1068 } 1068 }
1069 spin_unlock_irq(&ehci->lock); 1069 spin_unlock_irqrestore(&ehci->lock, flags);
1070} 1070}
1071 1071
1072static int ehci_get_frame (struct usb_hcd *hcd) 1072static int ehci_get_frame (struct usb_hcd *hcd)
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
index a44bb4a94954..89b7c70c6ed6 100644
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ b/drivers/usb/host/ehci-ixp4xx.c
@@ -61,6 +61,8 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = {
61#endif 61#endif
62 .relinquish_port = ehci_relinquish_port, 62 .relinquish_port = ehci_relinquish_port,
63 .port_handed_over = ehci_port_handed_over, 63 .port_handed_over = ehci_port_handed_over,
64
65 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
64}; 66};
65 67
66static int ixp4xx_ehci_probe(struct platform_device *pdev) 68static int ixp4xx_ehci_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 770dd9aba62a..1d283e1b2b8d 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
105 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 105 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
106 int retval; 106 int retval;
107 107
108 ehci_reset(ehci);
108 retval = ehci_halt(ehci); 109 retval = ehci_halt(ehci);
109 if (retval) 110 if (retval)
110 return retval; 111 return retval;
@@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
118 119
119 hcd->has_tt = 1; 120 hcd->has_tt = 1;
120 121
121 ehci_reset(ehci);
122 ehci_port_power(ehci, 0); 122 ehci_port_power(ehci, 0);
123 123
124 return retval; 124 return retval;
@@ -165,6 +165,8 @@ static const struct hc_driver ehci_orion_hc_driver = {
165 .bus_resume = ehci_bus_resume, 165 .bus_resume = ehci_bus_resume,
166 .relinquish_port = ehci_relinquish_port, 166 .relinquish_port = ehci_relinquish_port,
167 .port_handed_over = ehci_port_handed_over, 167 .port_handed_over = ehci_port_handed_over,
168
169 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
168}; 170};
169 171
170static void __init 172static void __init
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index f3683e1da161..c2f1b7df918c 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -404,6 +404,8 @@ static const struct hc_driver ehci_pci_hc_driver = {
404 .bus_resume = ehci_bus_resume, 404 .bus_resume = ehci_bus_resume,
405 .relinquish_port = ehci_relinquish_port, 405 .relinquish_port = ehci_relinquish_port,
406 .port_handed_over = ehci_port_handed_over, 406 .port_handed_over = ehci_port_handed_over,
407
408 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
407}; 409};
408 410
409/*-------------------------------------------------------------------------*/ 411/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index fbd272288fc2..36f96da129f5 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -79,6 +79,8 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
79#endif 79#endif
80 .relinquish_port = ehci_relinquish_port, 80 .relinquish_port = ehci_relinquish_port,
81 .port_handed_over = ehci_port_handed_over, 81 .port_handed_over = ehci_port_handed_over,
82
83 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
82}; 84};
83 85
84 86
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 93f7035d00a1..1dee33b9139e 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -75,6 +75,8 @@ static const struct hc_driver ps3_ehci_hc_driver = {
75#endif 75#endif
76 .relinquish_port = ehci_relinquish_port, 76 .relinquish_port = ehci_relinquish_port,
77 .port_handed_over = ehci_port_handed_over, 77 .port_handed_over = ehci_port_handed_over,
78
79 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
78}; 80};
79 81
80static int __devinit ps3_ehci_probe(struct ps3_system_bus_device *dev) 82static int __devinit ps3_ehci_probe(struct ps3_system_bus_device *dev)
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 3192f683f807..7673554fa64d 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -93,6 +93,22 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); 93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
94 qh->hw_alt_next = EHCI_LIST_END(ehci); 94 qh->hw_alt_next = EHCI_LIST_END(ehci);
95 95
96 /* Except for control endpoints, we make hardware maintain data
97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
99 * ever clear it.
100 */
101 if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
102 unsigned is_out, epnum;
103
104 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
105 epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
106 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
107 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
108 usb_settoggle (qh->dev, epnum, is_out, 1);
109 }
110 }
111
96 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
97 wmb (); 113 wmb ();
98 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); 114 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
@@ -123,6 +139,55 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
123 139
124/*-------------------------------------------------------------------------*/ 140/*-------------------------------------------------------------------------*/
125 141
142static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
143
144static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
145 struct usb_host_endpoint *ep)
146{
147 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
148 struct ehci_qh *qh = ep->hcpriv;
149 unsigned long flags;
150
151 spin_lock_irqsave(&ehci->lock, flags);
152 qh->clearing_tt = 0;
153 if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
154 && HC_IS_RUNNING(hcd->state))
155 qh_link_async(ehci, qh);
156 spin_unlock_irqrestore(&ehci->lock, flags);
157}
158
159static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
160 struct urb *urb, u32 token)
161{
162
163 /* If an async split transaction gets an error or is unlinked,
164 * the TT buffer may be left in an indeterminate state. We
165 * have to clear the TT buffer.
166 *
167 * Note: this routine is never called for Isochronous transfers.
168 */
169 if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
170#ifdef DEBUG
171 struct usb_device *tt = urb->dev->tt->hub;
172 dev_dbg(&tt->dev,
173 "clear tt buffer port %d, a%d ep%d t%08x\n",
174 urb->dev->ttport, urb->dev->devnum,
175 usb_pipeendpoint(urb->pipe), token);
176#endif /* DEBUG */
177 if (!ehci_is_TDI(ehci)
178 || urb->dev->tt->hub !=
179 ehci_to_hcd(ehci)->self.root_hub) {
180 if (usb_hub_clear_tt_buffer(urb) == 0)
181 qh->clearing_tt = 1;
182 } else {
183
184 /* REVISIT ARC-derived cores don't clear the root
185 * hub TT buffer in this way...
186 */
187 }
188 }
189}
190
126static int qtd_copy_status ( 191static int qtd_copy_status (
127 struct ehci_hcd *ehci, 192 struct ehci_hcd *ehci,
128 struct urb *urb, 193 struct urb *urb,
@@ -149,6 +214,14 @@ static int qtd_copy_status (
149 if (token & QTD_STS_BABBLE) { 214 if (token & QTD_STS_BABBLE) {
150 /* FIXME "must" disable babbling device's port too */ 215 /* FIXME "must" disable babbling device's port too */
151 status = -EOVERFLOW; 216 status = -EOVERFLOW;
217 /* CERR nonzero + halt --> stall */
218 } else if (QTD_CERR(token)) {
219 status = -EPIPE;
220
221 /* In theory, more than one of the following bits can be set
222 * since they are sticky and the transaction is retried.
223 * Which to test first is rather arbitrary.
224 */
152 } else if (token & QTD_STS_MMF) { 225 } else if (token & QTD_STS_MMF) {
153 /* fs/ls interrupt xfer missed the complete-split */ 226 /* fs/ls interrupt xfer missed the complete-split */
154 status = -EPROTO; 227 status = -EPROTO;
@@ -157,21 +230,15 @@ static int qtd_copy_status (
157 ? -ENOSR /* hc couldn't read data */ 230 ? -ENOSR /* hc couldn't read data */
158 : -ECOMM; /* hc couldn't write data */ 231 : -ECOMM; /* hc couldn't write data */
159 } else if (token & QTD_STS_XACT) { 232 } else if (token & QTD_STS_XACT) {
160 /* timeout, bad crc, wrong PID, etc; retried */ 233 /* timeout, bad CRC, wrong PID, etc */
161 if (QTD_CERR (token)) 234 ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
162 status = -EPIPE; 235 urb->dev->devpath,
163 else { 236 usb_pipeendpoint(urb->pipe),
164 ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n", 237 usb_pipein(urb->pipe) ? "in" : "out");
165 urb->dev->devpath,
166 usb_pipeendpoint (urb->pipe),
167 usb_pipein (urb->pipe) ? "in" : "out");
168 status = -EPROTO;
169 }
170 /* CERR nonzero + no errors + halt --> stall */
171 } else if (QTD_CERR (token))
172 status = -EPIPE;
173 else /* unknown */
174 status = -EPROTO; 238 status = -EPROTO;
239 } else { /* unknown */
240 status = -EPROTO;
241 }
175 242
176 ehci_vdbg (ehci, 243 ehci_vdbg (ehci,
177 "dev%d ep%d%s qtd token %08x --> status %d\n", 244 "dev%d ep%d%s qtd token %08x --> status %d\n",
@@ -179,28 +246,6 @@ static int qtd_copy_status (
179 usb_pipeendpoint (urb->pipe), 246 usb_pipeendpoint (urb->pipe),
180 usb_pipein (urb->pipe) ? "in" : "out", 247 usb_pipein (urb->pipe) ? "in" : "out",
181 token, status); 248 token, status);
182
183 /* if async CSPLIT failed, try cleaning out the TT buffer */
184 if (status != -EPIPE
185 && urb->dev->tt
186 && !usb_pipeint(urb->pipe)
187 && ((token & QTD_STS_MMF) != 0
188 || QTD_CERR(token) == 0)
189 && (!ehci_is_TDI(ehci)
190 || urb->dev->tt->hub !=
191 ehci_to_hcd(ehci)->self.root_hub)) {
192#ifdef DEBUG
193 struct usb_device *tt = urb->dev->tt->hub;
194 dev_dbg (&tt->dev,
195 "clear tt buffer port %d, a%d ep%d t%08x\n",
196 urb->dev->ttport, urb->dev->devnum,
197 usb_pipeendpoint (urb->pipe), token);
198#endif /* DEBUG */
199 /* REVISIT ARC-derived cores don't clear the root
200 * hub TT buffer in this way...
201 */
202 usb_hub_tt_clear_buffer (urb->dev, urb->pipe);
203 }
204 } 249 }
205 250
206 return status; 251 return status;
@@ -330,12 +375,11 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
330 */ 375 */
331 if ((token & QTD_STS_XACT) && 376 if ((token & QTD_STS_XACT) &&
332 QTD_CERR(token) == 0 && 377 QTD_CERR(token) == 0 &&
333 --qh->xacterrs > 0 && 378 ++qh->xacterrs < QH_XACTERR_MAX &&
334 !urb->unlinked) { 379 !urb->unlinked) {
335 ehci_dbg(ehci, 380 ehci_dbg(ehci,
336 "detected XactErr len %zu/%zu retry %d\n", 381 "detected XactErr len %zu/%zu retry %d\n",
337 qtd->length - QTD_LENGTH(token), qtd->length, 382 qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
338 QH_XACTERR_MAX - qh->xacterrs);
339 383
340 /* reset the token in the qtd and the 384 /* reset the token in the qtd and the
341 * qh overlay (which still contains 385 * qh overlay (which still contains
@@ -391,9 +435,16 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
391 /* qh unlinked; token in overlay may be most current */ 435 /* qh unlinked; token in overlay may be most current */
392 if (state == QH_STATE_IDLE 436 if (state == QH_STATE_IDLE
393 && cpu_to_hc32(ehci, qtd->qtd_dma) 437 && cpu_to_hc32(ehci, qtd->qtd_dma)
394 == qh->hw_current) 438 == qh->hw_current) {
395 token = hc32_to_cpu(ehci, qh->hw_token); 439 token = hc32_to_cpu(ehci, qh->hw_token);
396 440
441 /* An unlink may leave an incomplete
442 * async transaction in the TT buffer.
443 * We have to clear it.
444 */
445 ehci_clear_tt_buffer(ehci, qh, urb, token);
446 }
447
397 /* force halt for unlinked or blocked qh, so we'll 448 /* force halt for unlinked or blocked qh, so we'll
398 * patch the qh later and so that completions can't 449 * patch the qh later and so that completions can't
399 * activate it while we "know" it's stopped. 450 * activate it while we "know" it's stopped.
@@ -419,6 +470,13 @@ halt:
419 && (qtd->hw_alt_next 470 && (qtd->hw_alt_next
420 & EHCI_LIST_END(ehci))) 471 & EHCI_LIST_END(ehci)))
421 last_status = -EINPROGRESS; 472 last_status = -EINPROGRESS;
473
474 /* As part of low/full-speed endpoint-halt processing
475 * we must clear the TT buffer (11.17.5).
476 */
477 if (unlikely(last_status != -EINPROGRESS &&
478 last_status != -EREMOTEIO))
479 ehci_clear_tt_buffer(ehci, qh, urb, token);
422 } 480 }
423 481
424 /* if we're removing something not at the queue head, 482 /* if we're removing something not at the queue head,
@@ -435,7 +493,7 @@ halt:
435 last = qtd; 493 last = qtd;
436 494
437 /* reinit the xacterr counter for the next qtd */ 495 /* reinit the xacterr counter for the next qtd */
438 qh->xacterrs = QH_XACTERR_MAX; 496 qh->xacterrs = 0;
439 } 497 }
440 498
441 /* last urb's completion might still need calling */ 499 /* last urb's completion might still need calling */
@@ -834,6 +892,7 @@ done:
834 qh->qh_state = QH_STATE_IDLE; 892 qh->qh_state = QH_STATE_IDLE;
835 qh->hw_info1 = cpu_to_hc32(ehci, info1); 893 qh->hw_info1 = cpu_to_hc32(ehci, info1);
836 qh->hw_info2 = cpu_to_hc32(ehci, info2); 894 qh->hw_info2 = cpu_to_hc32(ehci, info2);
895 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
837 qh_refresh (ehci, qh); 896 qh_refresh (ehci, qh);
838 return qh; 897 return qh;
839} 898}
@@ -847,6 +906,10 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
847 __hc32 dma = QH_NEXT(ehci, qh->qh_dma); 906 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
848 struct ehci_qh *head; 907 struct ehci_qh *head;
849 908
909 /* Don't link a QH if there's a Clear-TT-Buffer pending */
910 if (unlikely(qh->clearing_tt))
911 return;
912
850 /* (re)start the async schedule? */ 913 /* (re)start the async schedule? */
851 head = ehci->async; 914 head = ehci->async;
852 timer_action_done (ehci, TIMER_ASYNC_OFF); 915 timer_action_done (ehci, TIMER_ASYNC_OFF);
@@ -864,7 +927,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
864 } 927 }
865 } 928 }
866 929
867 /* clear halt and maybe recover from silicon quirk */ 930 /* clear halt and/or toggle; and maybe recover from silicon quirk */
868 if (qh->qh_state == QH_STATE_IDLE) 931 if (qh->qh_state == QH_STATE_IDLE)
869 qh_refresh (ehci, qh); 932 qh_refresh (ehci, qh);
870 933
@@ -876,7 +939,8 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
876 head->qh_next.qh = qh; 939 head->qh_next.qh = qh;
877 head->hw_next = dma; 940 head->hw_next = dma;
878 941
879 qh->xacterrs = QH_XACTERR_MAX; 942 qh_get(qh);
943 qh->xacterrs = 0;
880 qh->qh_state = QH_STATE_LINKED; 944 qh->qh_state = QH_STATE_LINKED;
881 /* qtd completions reported later by interrupt */ 945 /* qtd completions reported later by interrupt */
882} 946}
@@ -1016,7 +1080,7 @@ submit_async (
1016 * the HC and TT handle it when the TT has a buffer ready. 1080 * the HC and TT handle it when the TT has a buffer ready.
1017 */ 1081 */
1018 if (likely (qh->qh_state == QH_STATE_IDLE)) 1082 if (likely (qh->qh_state == QH_STATE_IDLE))
1019 qh_link_async (ehci, qh_get (qh)); 1083 qh_link_async(ehci, qh);
1020 done: 1084 done:
1021 spin_unlock_irqrestore (&ehci->lock, flags); 1085 spin_unlock_irqrestore (&ehci->lock, flags);
1022 if (unlikely (qh == NULL)) 1086 if (unlikely (qh == NULL))
@@ -1051,8 +1115,6 @@ static void end_unlink_async (struct ehci_hcd *ehci)
1051 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 1115 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
1052 qh_link_async (ehci, qh); 1116 qh_link_async (ehci, qh);
1053 else { 1117 else {
1054 qh_put (qh); // refcount from async list
1055
1056 /* it's not free to turn the async schedule on/off; leave it 1118 /* it's not free to turn the async schedule on/off; leave it
1057 * active but idle for a while once it empties. 1119 * active but idle for a while once it empties.
1058 */ 1120 */
@@ -1060,6 +1122,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
1060 && ehci->async->qh_next.qh == NULL) 1122 && ehci->async->qh_next.qh == NULL)
1061 timer_action (ehci, TIMER_ASYNC_OFF); 1123 timer_action (ehci, TIMER_ASYNC_OFF);
1062 } 1124 }
1125 qh_put(qh); /* refcount from async list */
1063 1126
1064 if (next) { 1127 if (next) {
1065 ehci->reclaim = NULL; 1128 ehci->reclaim = NULL;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 9d1babc7ff65..edd61ee90323 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -542,6 +542,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
542 } 542 }
543 } 543 }
544 qh->qh_state = QH_STATE_LINKED; 544 qh->qh_state = QH_STATE_LINKED;
545 qh->xacterrs = 0;
545 qh_get (qh); 546 qh_get (qh);
546 547
547 /* update per-qh bandwidth for usbfs */ 548 /* update per-qh bandwidth for usbfs */
@@ -1619,11 +1620,14 @@ itd_complete (
1619 desc->status = -EPROTO; 1620 desc->status = -EPROTO;
1620 1621
1621 /* HC need not update length with this error */ 1622 /* HC need not update length with this error */
1622 if (!(t & EHCI_ISOC_BABBLE)) 1623 if (!(t & EHCI_ISOC_BABBLE)) {
1623 desc->actual_length = EHCI_ITD_LENGTH (t); 1624 desc->actual_length = EHCI_ITD_LENGTH(t);
1625 urb->actual_length += desc->actual_length;
1626 }
1624 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) { 1627 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1625 desc->status = 0; 1628 desc->status = 0;
1626 desc->actual_length = EHCI_ITD_LENGTH (t); 1629 desc->actual_length = EHCI_ITD_LENGTH(t);
1630 urb->actual_length += desc->actual_length;
1627 } else { 1631 } else {
1628 /* URB was too late */ 1632 /* URB was too late */
1629 desc->status = -EXDEV; 1633 desc->status = -EXDEV;
@@ -2014,7 +2018,8 @@ sitd_complete (
2014 desc->status = -EPROTO; 2018 desc->status = -EPROTO;
2015 } else { 2019 } else {
2016 desc->status = 0; 2020 desc->status = 0;
2017 desc->actual_length = desc->length - SITD_LENGTH (t); 2021 desc->actual_length = desc->length - SITD_LENGTH(t);
2022 urb->actual_length += desc->actual_length;
2018 } 2023 }
2019 stream->depth -= stream->interval << 3; 2024 stream->depth -= stream->interval << 3;
2020 2025
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 90ad3395bb21..2bfff30f4704 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -354,7 +354,9 @@ struct ehci_qh {
354 unsigned short period; /* polling interval */ 354 unsigned short period; /* polling interval */
355 unsigned short start; /* where polling starts */ 355 unsigned short start; /* where polling starts */
356#define NO_FRAME ((unsigned short)~0) /* pick new start */ 356#define NO_FRAME ((unsigned short)~0) /* pick new start */
357
357 struct usb_device *dev; /* access to TT */ 358 struct usb_device *dev; /* access to TT */
359 unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
358} __attribute__ ((aligned (32))); 360} __attribute__ ((aligned (32)));
359 361
360/*-------------------------------------------------------------------------*/ 362/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index bb63b68ddb77..62a226b61670 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -576,9 +576,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
576 out_be16(&usb->fhci->regs->usb_event, 576 out_be16(&usb->fhci->regs->usb_event,
577 usb->saved_msk); 577 usb->saved_msk);
578 } else if (usb->port_status == FHCI_PORT_DISABLED) { 578 } else if (usb->port_status == FHCI_PORT_DISABLED) {
579 if (fhci_ioports_check_bus_state(fhci) == 1 && 579 if (fhci_ioports_check_bus_state(fhci) == 1)
580 usb->port_status != FHCI_PORT_LOW &&
581 usb->port_status != FHCI_PORT_FULL)
582 fhci_device_connected_interrupt(fhci); 580 fhci_device_connected_interrupt(fhci);
583 } 581 }
584 usb_er &= ~USB_E_RESET_MASK; 582 usb_er &= ~USB_E_RESET_MASK;
@@ -605,9 +603,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
605 } 603 }
606 604
607 if (usb_er & USB_E_IDLE_MASK) { 605 if (usb_er & USB_E_IDLE_MASK) {
608 if (usb->port_status == FHCI_PORT_DISABLED && 606 if (usb->port_status == FHCI_PORT_DISABLED) {
609 usb->port_status != FHCI_PORT_LOW &&
610 usb->port_status != FHCI_PORT_FULL) {
611 usb_er &= ~USB_E_RESET_MASK; 607 usb_er &= ~USB_E_RESET_MASK;
612 fhci_device_connected_interrupt(fhci); 608 fhci_device_connected_interrupt(fhci);
613 } else if (usb->port_status == 609 } else if (usb->port_status ==
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 3fa3a1702796..d4feebfc63bd 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -361,7 +361,7 @@ static int __devexit isp1760_plat_remove(struct platform_device *pdev)
361 361
362static struct platform_driver isp1760_plat_driver = { 362static struct platform_driver isp1760_plat_driver = {
363 .probe = isp1760_plat_probe, 363 .probe = isp1760_plat_probe,
364 .remove = isp1760_plat_remove, 364 .remove = __devexit_p(isp1760_plat_remove),
365 .driver = { 365 .driver = {
366 .name = "isp1760", 366 .name = "isp1760",
367 }, 367 },
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index f3aaba35e912..83cbecd2a1ed 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
282static void ohci_omap_stop(struct usb_hcd *hcd) 282static void ohci_omap_stop(struct usb_hcd *hcd)
283{ 283{
284 dev_dbg(hcd->self.controller, "stopping USB Controller\n"); 284 dev_dbg(hcd->self.controller, "stopping USB Controller\n");
285 ohci_stop(hcd);
285 omap_ohci_clock_power(0); 286 omap_ohci_clock_power(0);
286} 287}
287 288
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 56976cc0352a..e18f74946e68 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -26,7 +26,6 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/smp_lock.h>
30#include <linux/errno.h> 29#include <linux/errno.h>
31#include <linux/init.h> 30#include <linux/init.h>
32#include <linux/timer.h> 31#include <linux/timer.h>
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 2501c571f855..705e34324156 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
173{ 173{
174 void *addr; 174 void *addr;
175 u32 temp; 175 u32 temp;
176 u64 temp_64;
176 177
177 addr = &ir_set->irq_pending; 178 addr = &ir_set->irq_pending;
178 temp = xhci_readl(xhci, addr); 179 temp = xhci_readl(xhci, addr);
@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
200 xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", 201 xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
201 addr, (unsigned int)temp); 202 addr, (unsigned int)temp);
202 203
203 addr = &ir_set->erst_base[0]; 204 addr = &ir_set->erst_base;
204 temp = xhci_readl(xhci, addr); 205 temp_64 = xhci_read_64(xhci, addr);
205 xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n", 206 xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
206 addr, (unsigned int) temp); 207 addr, temp_64);
207
208 addr = &ir_set->erst_base[1];
209 temp = xhci_readl(xhci, addr);
210 xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
211 addr, (unsigned int) temp);
212 208
213 addr = &ir_set->erst_dequeue[0]; 209 addr = &ir_set->erst_dequeue;
214 temp = xhci_readl(xhci, addr); 210 temp_64 = xhci_read_64(xhci, addr);
215 xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n", 211 xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
216 addr, (unsigned int) temp); 212 addr, temp_64);
217
218 addr = &ir_set->erst_dequeue[1];
219 temp = xhci_readl(xhci, addr);
220 xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n",
221 addr, (unsigned int) temp);
222} 213}
223 214
224void xhci_print_run_regs(struct xhci_hcd *xhci) 215void xhci_print_run_regs(struct xhci_hcd *xhci)
@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
268 xhci_dbg(xhci, "Link TRB:\n"); 259 xhci_dbg(xhci, "Link TRB:\n");
269 xhci_print_trb_offsets(xhci, trb); 260 xhci_print_trb_offsets(xhci, trb);
270 261
271 address = trb->link.segment_ptr[0] + 262 address = trb->link.segment_ptr;
272 (((u64) trb->link.segment_ptr[1]) << 32);
273 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); 263 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
274 264
275 xhci_dbg(xhci, "Interrupter target = 0x%x\n", 265 xhci_dbg(xhci, "Interrupter target = 0x%x\n",
@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
282 (unsigned int) (trb->link.control & TRB_NO_SNOOP)); 272 (unsigned int) (trb->link.control & TRB_NO_SNOOP));
283 break; 273 break;
284 case TRB_TYPE(TRB_TRANSFER): 274 case TRB_TYPE(TRB_TRANSFER):
285 address = trb->trans_event.buffer[0] + 275 address = trb->trans_event.buffer;
286 (((u64) trb->trans_event.buffer[1]) << 32);
287 /* 276 /*
288 * FIXME: look at flags to figure out if it's an address or if 277 * FIXME: look at flags to figure out if it's an address or if
289 * the data is directly in the buffer field. 278 * the data is directly in the buffer field.
@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
291 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); 280 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
292 break; 281 break;
293 case TRB_TYPE(TRB_COMPLETION): 282 case TRB_TYPE(TRB_COMPLETION):
294 address = trb->event_cmd.cmd_trb[0] + 283 address = trb->event_cmd.cmd_trb;
295 (((u64) trb->event_cmd.cmd_trb[1]) << 32);
296 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); 284 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
297 xhci_dbg(xhci, "Completion status = %u\n", 285 xhci_dbg(xhci, "Completion status = %u\n",
298 (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); 286 (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
328 for (i = 0; i < TRBS_PER_SEGMENT; ++i) { 316 for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
329 trb = &seg->trbs[i]; 317 trb = &seg->trbs[i];
330 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, 318 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
331 (unsigned int) trb->link.segment_ptr[0], 319 lower_32_bits(trb->link.segment_ptr),
332 (unsigned int) trb->link.segment_ptr[1], 320 upper_32_bits(trb->link.segment_ptr),
333 (unsigned int) trb->link.intr_target, 321 (unsigned int) trb->link.intr_target,
334 (unsigned int) trb->link.control); 322 (unsigned int) trb->link.control);
335 addr += sizeof(*trb); 323 addr += sizeof(*trb);
@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
386 entry = &erst->entries[i]; 374 entry = &erst->entries[i];
387 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", 375 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
388 (unsigned int) addr, 376 (unsigned int) addr,
389 (unsigned int) entry->seg_addr[0], 377 lower_32_bits(entry->seg_addr),
390 (unsigned int) entry->seg_addr[1], 378 upper_32_bits(entry->seg_addr),
391 (unsigned int) entry->seg_size, 379 (unsigned int) entry->seg_size,
392 (unsigned int) entry->rsvd); 380 (unsigned int) entry->rsvd);
393 addr += sizeof(*entry); 381 addr += sizeof(*entry);
@@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
396 384
397void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) 385void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
398{ 386{
399 u32 val; 387 u64 val;
400 388
401 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); 389 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
402 xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); 390 xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
403 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); 391 lower_32_bits(val));
404 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); 392 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
393 upper_32_bits(val));
405} 394}
406 395
407void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) 396/* Print the last 32 bytes for 64-byte contexts */
397static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
398{
399 int i;
400 for (i = 0; i < 4; ++i) {
401 xhci_dbg(xhci, "@%p (virt) @%08llx "
402 "(dma) %#08llx - rsvd64[%d]\n",
403 &ctx[4 + i], (unsigned long long)dma,
404 ctx[4 + i], i);
405 dma += 8;
406 }
407}
408
409void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
408{ 410{
409 int i, j;
410 int last_ep_ctx = 31;
411 /* Fields are 32 bits wide, DMA addresses are in bytes */ 411 /* Fields are 32 bits wide, DMA addresses are in bytes */
412 int field_size = 32 / 8; 412 int field_size = 32 / 8;
413 int i;
413 414
414 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", 415 struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
415 &ctx->drop_flags, (unsigned long long)dma, 416 dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx);
416 ctx->drop_flags); 417 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
417 dma += field_size;
418 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
419 &ctx->add_flags, (unsigned long long)dma,
420 ctx->add_flags);
421 dma += field_size;
422 for (i = 0; i > 6; ++i) {
423 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
424 &ctx->rsvd[i], (unsigned long long)dma,
425 ctx->rsvd[i], i);
426 dma += field_size;
427 }
428 418
429 xhci_dbg(xhci, "Slot Context:\n"); 419 xhci_dbg(xhci, "Slot Context:\n");
430 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", 420 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
431 &ctx->slot.dev_info, 421 &slot_ctx->dev_info,
432 (unsigned long long)dma, ctx->slot.dev_info); 422 (unsigned long long)dma, slot_ctx->dev_info);
433 dma += field_size; 423 dma += field_size;
434 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", 424 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
435 &ctx->slot.dev_info2, 425 &slot_ctx->dev_info2,
436 (unsigned long long)dma, ctx->slot.dev_info2); 426 (unsigned long long)dma, slot_ctx->dev_info2);
437 dma += field_size; 427 dma += field_size;
438 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", 428 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
439 &ctx->slot.tt_info, 429 &slot_ctx->tt_info,
440 (unsigned long long)dma, ctx->slot.tt_info); 430 (unsigned long long)dma, slot_ctx->tt_info);
441 dma += field_size; 431 dma += field_size;
442 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", 432 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
443 &ctx->slot.dev_state, 433 &slot_ctx->dev_state,
444 (unsigned long long)dma, ctx->slot.dev_state); 434 (unsigned long long)dma, slot_ctx->dev_state);
445 dma += field_size; 435 dma += field_size;
446 for (i = 0; i > 4; ++i) { 436 for (i = 0; i < 4; ++i) {
447 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", 437 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
448 &ctx->slot.reserved[i], (unsigned long long)dma, 438 &slot_ctx->reserved[i], (unsigned long long)dma,
449 ctx->slot.reserved[i], i); 439 slot_ctx->reserved[i], i);
450 dma += field_size; 440 dma += field_size;
451 } 441 }
452 442
443 if (csz)
444 dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
445}
446
447void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
448 struct xhci_container_ctx *ctx,
449 unsigned int last_ep)
450{
451 int i, j;
452 int last_ep_ctx = 31;
453 /* Fields are 32 bits wide, DMA addresses are in bytes */
454 int field_size = 32 / 8;
455 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
456
453 if (last_ep < 31) 457 if (last_ep < 31)
454 last_ep_ctx = last_ep + 1; 458 last_ep_ctx = last_ep + 1;
455 for (i = 0; i < last_ep_ctx; ++i) { 459 for (i = 0; i < last_ep_ctx; ++i) {
460 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
461 dma_addr_t dma = ctx->dma +
462 ((unsigned long)ep_ctx - (unsigned long)ctx);
463
456 xhci_dbg(xhci, "Endpoint %02d Context:\n", i); 464 xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
457 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", 465 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
458 &ctx->ep[i].ep_info, 466 &ep_ctx->ep_info,
459 (unsigned long long)dma, ctx->ep[i].ep_info); 467 (unsigned long long)dma, ep_ctx->ep_info);
460 dma += field_size; 468 dma += field_size;
461 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", 469 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
462 &ctx->ep[i].ep_info2, 470 &ep_ctx->ep_info2,
463 (unsigned long long)dma, ctx->ep[i].ep_info2); 471 (unsigned long long)dma, ep_ctx->ep_info2);
464 dma += field_size;
465 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
466 &ctx->ep[i].deq[0],
467 (unsigned long long)dma, ctx->ep[i].deq[0]);
468 dma += field_size;
469 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
470 &ctx->ep[i].deq[1],
471 (unsigned long long)dma, ctx->ep[i].deq[1]);
472 dma += field_size; 472 dma += field_size;
473 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
474 &ep_ctx->deq,
475 (unsigned long long)dma, ep_ctx->deq);
476 dma += 2*field_size;
473 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", 477 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
474 &ctx->ep[i].tx_info, 478 &ep_ctx->tx_info,
475 (unsigned long long)dma, ctx->ep[i].tx_info); 479 (unsigned long long)dma, ep_ctx->tx_info);
476 dma += field_size; 480 dma += field_size;
477 for (j = 0; j < 3; ++j) { 481 for (j = 0; j < 3; ++j) {
478 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", 482 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
479 &ctx->ep[i].reserved[j], 483 &ep_ctx->reserved[j],
480 (unsigned long long)dma, 484 (unsigned long long)dma,
481 ctx->ep[i].reserved[j], j); 485 ep_ctx->reserved[j], j);
486 dma += field_size;
487 }
488
489 if (csz)
490 dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
491 }
492}
493
494void xhci_dbg_ctx(struct xhci_hcd *xhci,
495 struct xhci_container_ctx *ctx,
496 unsigned int last_ep)
497{
498 int i;
499 /* Fields are 32 bits wide, DMA addresses are in bytes */
500 int field_size = 32 / 8;
501 struct xhci_slot_ctx *slot_ctx;
502 dma_addr_t dma = ctx->dma;
503 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
504
505 if (ctx->type == XHCI_CTX_TYPE_INPUT) {
506 struct xhci_input_control_ctx *ctrl_ctx =
507 xhci_get_input_control_ctx(xhci, ctx);
508 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
509 &ctrl_ctx->drop_flags, (unsigned long long)dma,
510 ctrl_ctx->drop_flags);
511 dma += field_size;
512 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
513 &ctrl_ctx->add_flags, (unsigned long long)dma,
514 ctrl_ctx->add_flags);
515 dma += field_size;
516 for (i = 0; i < 6; ++i) {
517 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
518 &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
519 ctrl_ctx->rsvd2[i], i);
482 dma += field_size; 520 dma += field_size;
483 } 521 }
522
523 if (csz)
524 dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
484 } 525 }
526
527 slot_ctx = xhci_get_slot_ctx(xhci, ctx);
528 xhci_dbg_slot_ctx(xhci, ctx);
529 xhci_dbg_ep_ctx(xhci, ctx, last_ep);
485} 530}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index dba3e07ccd09..816c39caca1c 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -103,7 +103,10 @@ int xhci_reset(struct xhci_hcd *xhci)
103 u32 state; 103 u32 state;
104 104
105 state = xhci_readl(xhci, &xhci->op_regs->status); 105 state = xhci_readl(xhci, &xhci->op_regs->status);
106 BUG_ON((state & STS_HALT) == 0); 106 if ((state & STS_HALT) == 0) {
107 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
108 return 0;
109 }
107 110
108 xhci_dbg(xhci, "// Reset the HC\n"); 111 xhci_dbg(xhci, "// Reset the HC\n");
109 command = xhci_readl(xhci, &xhci->op_regs->command); 112 command = xhci_readl(xhci, &xhci->op_regs->command);
@@ -226,6 +229,7 @@ int xhci_init(struct usb_hcd *hcd)
226static void xhci_work(struct xhci_hcd *xhci) 229static void xhci_work(struct xhci_hcd *xhci)
227{ 230{
228 u32 temp; 231 u32 temp;
232 u64 temp_64;
229 233
230 /* 234 /*
231 * Clear the op reg interrupt status first, 235 * Clear the op reg interrupt status first,
@@ -248,9 +252,9 @@ static void xhci_work(struct xhci_hcd *xhci)
248 /* FIXME this should be a delayed service routine that clears the EHB */ 252 /* FIXME this should be a delayed service routine that clears the EHB */
249 xhci_handle_event(xhci); 253 xhci_handle_event(xhci);
250 254
251 /* Clear the event handler busy flag; the event ring should be empty. */ 255 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
252 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 256 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
253 xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]); 257 xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
254 /* Flush posted writes -- FIXME is this necessary? */ 258 /* Flush posted writes -- FIXME is this necessary? */
255 xhci_readl(xhci, &xhci->ir_set->irq_pending); 259 xhci_readl(xhci, &xhci->ir_set->irq_pending);
256} 260}
@@ -266,19 +270,34 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
266{ 270{
267 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 271 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
268 u32 temp, temp2; 272 u32 temp, temp2;
273 union xhci_trb *trb;
269 274
270 spin_lock(&xhci->lock); 275 spin_lock(&xhci->lock);
276 trb = xhci->event_ring->dequeue;
271 /* Check if the xHC generated the interrupt, or the irq is shared */ 277 /* Check if the xHC generated the interrupt, or the irq is shared */
272 temp = xhci_readl(xhci, &xhci->op_regs->status); 278 temp = xhci_readl(xhci, &xhci->op_regs->status);
273 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); 279 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
280 if (temp == 0xffffffff && temp2 == 0xffffffff)
281 goto hw_died;
282
274 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { 283 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
275 spin_unlock(&xhci->lock); 284 spin_unlock(&xhci->lock);
276 return IRQ_NONE; 285 return IRQ_NONE;
277 } 286 }
287 xhci_dbg(xhci, "op reg status = %08x\n", temp);
288 xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
289 xhci_dbg(xhci, "Event ring dequeue ptr:\n");
290 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
291 (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
292 lower_32_bits(trb->link.segment_ptr),
293 upper_32_bits(trb->link.segment_ptr),
294 (unsigned int) trb->link.intr_target,
295 (unsigned int) trb->link.control);
278 296
279 if (temp & STS_FATAL) { 297 if (temp & STS_FATAL) {
280 xhci_warn(xhci, "WARNING: Host System Error\n"); 298 xhci_warn(xhci, "WARNING: Host System Error\n");
281 xhci_halt(xhci); 299 xhci_halt(xhci);
300hw_died:
282 xhci_to_hcd(xhci)->state = HC_STATE_HALT; 301 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
283 spin_unlock(&xhci->lock); 302 spin_unlock(&xhci->lock);
284 return -ESHUTDOWN; 303 return -ESHUTDOWN;
@@ -295,6 +314,7 @@ void xhci_event_ring_work(unsigned long arg)
295{ 314{
296 unsigned long flags; 315 unsigned long flags;
297 int temp; 316 int temp;
317 u64 temp_64;
298 struct xhci_hcd *xhci = (struct xhci_hcd *) arg; 318 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
299 int i, j; 319 int i, j;
300 320
@@ -311,9 +331,9 @@ void xhci_event_ring_work(unsigned long arg)
311 xhci_dbg(xhci, "Event ring:\n"); 331 xhci_dbg(xhci, "Event ring:\n");
312 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 332 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
313 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 333 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
314 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 334 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
315 temp &= ERST_PTR_MASK; 335 temp_64 &= ~ERST_PTR_MASK;
316 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); 336 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
317 xhci_dbg(xhci, "Command ring:\n"); 337 xhci_dbg(xhci, "Command ring:\n");
318 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); 338 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
319 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 339 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
@@ -356,6 +376,7 @@ void xhci_event_ring_work(unsigned long arg)
356int xhci_run(struct usb_hcd *hcd) 376int xhci_run(struct usb_hcd *hcd)
357{ 377{
358 u32 temp; 378 u32 temp;
379 u64 temp_64;
359 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 380 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
360 void (*doorbell)(struct xhci_hcd *) = NULL; 381 void (*doorbell)(struct xhci_hcd *) = NULL;
361 382
@@ -382,6 +403,20 @@ int xhci_run(struct usb_hcd *hcd)
382 add_timer(&xhci->event_ring_timer); 403 add_timer(&xhci->event_ring_timer);
383#endif 404#endif
384 405
406 xhci_dbg(xhci, "Command ring memory map follows:\n");
407 xhci_debug_ring(xhci, xhci->cmd_ring);
408 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
409 xhci_dbg_cmd_ptrs(xhci);
410
411 xhci_dbg(xhci, "ERST memory map follows:\n");
412 xhci_dbg_erst(xhci, &xhci->erst);
413 xhci_dbg(xhci, "Event ring:\n");
414 xhci_debug_ring(xhci, xhci->event_ring);
415 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
416 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
417 temp_64 &= ~ERST_PTR_MASK;
418 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
419
385 xhci_dbg(xhci, "// Set the interrupt modulation register\n"); 420 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
386 temp = xhci_readl(xhci, &xhci->ir_set->irq_control); 421 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
387 temp &= ~ER_IRQ_INTERVAL_MASK; 422 temp &= ~ER_IRQ_INTERVAL_MASK;
@@ -406,22 +441,6 @@ int xhci_run(struct usb_hcd *hcd)
406 if (NUM_TEST_NOOPS > 0) 441 if (NUM_TEST_NOOPS > 0)
407 doorbell = xhci_setup_one_noop(xhci); 442 doorbell = xhci_setup_one_noop(xhci);
408 443
409 xhci_dbg(xhci, "Command ring memory map follows:\n");
410 xhci_debug_ring(xhci, xhci->cmd_ring);
411 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
412 xhci_dbg_cmd_ptrs(xhci);
413
414 xhci_dbg(xhci, "ERST memory map follows:\n");
415 xhci_dbg_erst(xhci, &xhci->erst);
416 xhci_dbg(xhci, "Event ring:\n");
417 xhci_debug_ring(xhci, xhci->event_ring);
418 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
419 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
420 temp &= ERST_PTR_MASK;
421 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
422 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
423 xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
424
425 temp = xhci_readl(xhci, &xhci->op_regs->command); 444 temp = xhci_readl(xhci, &xhci->op_regs->command);
426 temp |= (CMD_RUN); 445 temp |= (CMD_RUN);
427 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", 446 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
@@ -601,10 +620,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
601 goto exit; 620 goto exit;
602 } 621 }
603 if (usb_endpoint_xfer_control(&urb->ep->desc)) 622 if (usb_endpoint_xfer_control(&urb->ep->desc))
604 ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb, 623 /* We have a spinlock and interrupts disabled, so we must pass
624 * atomic context to this function, which may allocate memory.
625 */
626 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
605 slot_id, ep_index); 627 slot_id, ep_index);
606 else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) 628 else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
607 ret = xhci_queue_bulk_tx(xhci, mem_flags, urb, 629 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
608 slot_id, ep_index); 630 slot_id, ep_index);
609 else 631 else
610 ret = -EINVAL; 632 ret = -EINVAL;
@@ -661,8 +683,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
661 goto done; 683 goto done;
662 684
663 xhci_dbg(xhci, "Cancel URB %p\n", urb); 685 xhci_dbg(xhci, "Cancel URB %p\n", urb);
686 xhci_dbg(xhci, "Event ring:\n");
687 xhci_debug_ring(xhci, xhci->event_ring);
664 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 688 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
665 ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; 689 ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
690 xhci_dbg(xhci, "Endpoint ring:\n");
691 xhci_debug_ring(xhci, ep_ring);
666 td = (struct xhci_td *) urb->hcpriv; 692 td = (struct xhci_td *) urb->hcpriv;
667 693
668 ep_ring->cancels_pending++; 694 ep_ring->cancels_pending++;
@@ -696,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
696 struct usb_host_endpoint *ep) 722 struct usb_host_endpoint *ep)
697{ 723{
698 struct xhci_hcd *xhci; 724 struct xhci_hcd *xhci;
699 struct xhci_device_control *in_ctx; 725 struct xhci_container_ctx *in_ctx, *out_ctx;
726 struct xhci_input_control_ctx *ctrl_ctx;
727 struct xhci_slot_ctx *slot_ctx;
700 unsigned int last_ctx; 728 unsigned int last_ctx;
701 unsigned int ep_index; 729 unsigned int ep_index;
702 struct xhci_ep_ctx *ep_ctx; 730 struct xhci_ep_ctx *ep_ctx;
@@ -724,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
724 } 752 }
725 753
726 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 754 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
755 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
756 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
727 ep_index = xhci_get_endpoint_index(&ep->desc); 757 ep_index = xhci_get_endpoint_index(&ep->desc);
728 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; 758 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
729 /* If the HC already knows the endpoint is disabled, 759 /* If the HC already knows the endpoint is disabled,
730 * or the HCD has noted it is disabled, ignore this request 760 * or the HCD has noted it is disabled, ignore this request
731 */ 761 */
732 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || 762 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
733 in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { 763 ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
734 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 764 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
735 __func__, ep); 765 __func__, ep);
736 return 0; 766 return 0;
737 } 767 }
738 768
739 in_ctx->drop_flags |= drop_flag; 769 ctrl_ctx->drop_flags |= drop_flag;
740 new_drop_flags = in_ctx->drop_flags; 770 new_drop_flags = ctrl_ctx->drop_flags;
741 771
742 in_ctx->add_flags = ~drop_flag; 772 ctrl_ctx->add_flags = ~drop_flag;
743 new_add_flags = in_ctx->add_flags; 773 new_add_flags = ctrl_ctx->add_flags;
744 774
745 last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags); 775 last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
776 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
746 /* Update the last valid endpoint context, if we deleted the last one */ 777 /* Update the last valid endpoint context, if we deleted the last one */
747 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { 778 if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
748 in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 779 slot_ctx->dev_info &= ~LAST_CTX_MASK;
749 in_ctx->slot.dev_info |= LAST_CTX(last_ctx); 780 slot_ctx->dev_info |= LAST_CTX(last_ctx);
750 } 781 }
751 new_slot_info = in_ctx->slot.dev_info; 782 new_slot_info = slot_ctx->dev_info;
752 783
753 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 784 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
754 785
@@ -778,17 +809,22 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
778 struct usb_host_endpoint *ep) 809 struct usb_host_endpoint *ep)
779{ 810{
780 struct xhci_hcd *xhci; 811 struct xhci_hcd *xhci;
781 struct xhci_device_control *in_ctx; 812 struct xhci_container_ctx *in_ctx, *out_ctx;
782 unsigned int ep_index; 813 unsigned int ep_index;
783 struct xhci_ep_ctx *ep_ctx; 814 struct xhci_ep_ctx *ep_ctx;
815 struct xhci_slot_ctx *slot_ctx;
816 struct xhci_input_control_ctx *ctrl_ctx;
784 u32 added_ctxs; 817 u32 added_ctxs;
785 unsigned int last_ctx; 818 unsigned int last_ctx;
786 u32 new_add_flags, new_drop_flags, new_slot_info; 819 u32 new_add_flags, new_drop_flags, new_slot_info;
787 int ret = 0; 820 int ret = 0;
788 821
789 ret = xhci_check_args(hcd, udev, ep, 1, __func__); 822 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
790 if (ret <= 0) 823 if (ret <= 0) {
824 /* So we won't queue a reset ep command for a root hub */
825 ep->hcpriv = NULL;
791 return ret; 826 return ret;
827 }
792 xhci = hcd_to_xhci(hcd); 828 xhci = hcd_to_xhci(hcd);
793 829
794 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 830 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
@@ -810,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
810 } 846 }
811 847
812 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 848 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
849 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
850 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
813 ep_index = xhci_get_endpoint_index(&ep->desc); 851 ep_index = xhci_get_endpoint_index(&ep->desc);
814 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; 852 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
815 /* If the HCD has already noted the endpoint is enabled, 853 /* If the HCD has already noted the endpoint is enabled,
816 * ignore this request. 854 * ignore this request.
817 */ 855 */
818 if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { 856 if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
819 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 857 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
820 __func__, ep); 858 __func__, ep);
821 return 0; 859 return 0;
@@ -833,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
833 return -ENOMEM; 871 return -ENOMEM;
834 } 872 }
835 873
836 in_ctx->add_flags |= added_ctxs; 874 ctrl_ctx->add_flags |= added_ctxs;
837 new_add_flags = in_ctx->add_flags; 875 new_add_flags = ctrl_ctx->add_flags;
838 876
839 /* If xhci_endpoint_disable() was called for this endpoint, but the 877 /* If xhci_endpoint_disable() was called for this endpoint, but the
840 * xHC hasn't been notified yet through the check_bandwidth() call, 878 * xHC hasn't been notified yet through the check_bandwidth() call,
@@ -842,14 +880,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
842 * descriptors. We must drop and re-add this endpoint, so we leave the 880 * descriptors. We must drop and re-add this endpoint, so we leave the
843 * drop flags alone. 881 * drop flags alone.
844 */ 882 */
845 new_drop_flags = in_ctx->drop_flags; 883 new_drop_flags = ctrl_ctx->drop_flags;
846 884
885 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
847 /* Update the last valid endpoint context, if we just added one past */ 886 /* Update the last valid endpoint context, if we just added one past */
848 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { 887 if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
849 in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 888 slot_ctx->dev_info &= ~LAST_CTX_MASK;
850 in_ctx->slot.dev_info |= LAST_CTX(last_ctx); 889 slot_ctx->dev_info |= LAST_CTX(last_ctx);
851 } 890 }
852 new_slot_info = in_ctx->slot.dev_info; 891 new_slot_info = slot_ctx->dev_info;
892
893 /* Store the usb_device pointer for later use */
894 ep->hcpriv = udev;
853 895
854 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 896 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
855 (unsigned int) ep->desc.bEndpointAddress, 897 (unsigned int) ep->desc.bEndpointAddress,
@@ -860,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
860 return 0; 902 return 0;
861} 903}
862 904
863static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) 905static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
864{ 906{
907 struct xhci_input_control_ctx *ctrl_ctx;
865 struct xhci_ep_ctx *ep_ctx; 908 struct xhci_ep_ctx *ep_ctx;
909 struct xhci_slot_ctx *slot_ctx;
866 int i; 910 int i;
867 911
868 /* When a device's add flag and drop flag are zero, any subsequent 912 /* When a device's add flag and drop flag are zero, any subsequent
@@ -870,17 +914,18 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
870 * untouched. Make sure we don't leave any old state in the input 914 * untouched. Make sure we don't leave any old state in the input
871 * endpoint contexts. 915 * endpoint contexts.
872 */ 916 */
873 virt_dev->in_ctx->drop_flags = 0; 917 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
874 virt_dev->in_ctx->add_flags = 0; 918 ctrl_ctx->drop_flags = 0;
875 virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 919 ctrl_ctx->add_flags = 0;
920 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
921 slot_ctx->dev_info &= ~LAST_CTX_MASK;
876 /* Endpoint 0 is always valid */ 922 /* Endpoint 0 is always valid */
877 virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1); 923 slot_ctx->dev_info |= LAST_CTX(1);
878 for (i = 1; i < 31; ++i) { 924 for (i = 1; i < 31; ++i) {
879 ep_ctx = &virt_dev->in_ctx->ep[i]; 925 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
880 ep_ctx->ep_info = 0; 926 ep_ctx->ep_info = 0;
881 ep_ctx->ep_info2 = 0; 927 ep_ctx->ep_info2 = 0;
882 ep_ctx->deq[0] = 0; 928 ep_ctx->deq = 0;
883 ep_ctx->deq[1] = 0;
884 ep_ctx->tx_info = 0; 929 ep_ctx->tx_info = 0;
885 } 930 }
886} 931}
@@ -903,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
903 unsigned long flags; 948 unsigned long flags;
904 struct xhci_hcd *xhci; 949 struct xhci_hcd *xhci;
905 struct xhci_virt_device *virt_dev; 950 struct xhci_virt_device *virt_dev;
951 struct xhci_input_control_ctx *ctrl_ctx;
952 struct xhci_slot_ctx *slot_ctx;
906 953
907 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 954 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
908 if (ret <= 0) 955 if (ret <= 0)
@@ -918,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
918 virt_dev = xhci->devs[udev->slot_id]; 965 virt_dev = xhci->devs[udev->slot_id];
919 966
920 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 967 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
921 virt_dev->in_ctx->add_flags |= SLOT_FLAG; 968 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
922 virt_dev->in_ctx->add_flags &= ~EP0_FLAG; 969 ctrl_ctx->add_flags |= SLOT_FLAG;
923 virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG; 970 ctrl_ctx->add_flags &= ~EP0_FLAG;
924 virt_dev->in_ctx->drop_flags &= ~EP0_FLAG; 971 ctrl_ctx->drop_flags &= ~SLOT_FLAG;
972 ctrl_ctx->drop_flags &= ~EP0_FLAG;
925 xhci_dbg(xhci, "New Input Control Context:\n"); 973 xhci_dbg(xhci, "New Input Control Context:\n");
926 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 974 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
927 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); 975 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
976 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
928 977
929 spin_lock_irqsave(&xhci->lock, flags); 978 spin_lock_irqsave(&xhci->lock, flags);
930 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, 979 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
931 udev->slot_id); 980 udev->slot_id);
932 if (ret < 0) { 981 if (ret < 0) {
933 spin_unlock_irqrestore(&xhci->lock, flags); 982 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -982,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
982 } 1031 }
983 1032
984 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 1033 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
985 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 1034 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
986 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); 1035 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
987 1036
988 xhci_zero_in_ctx(virt_dev); 1037 xhci_zero_in_ctx(xhci, virt_dev);
989 /* Free any old rings */ 1038 /* Free any old rings */
990 for (i = 1; i < 31; ++i) { 1039 for (i = 1; i < 31; ++i) {
991 if (virt_dev->new_ep_rings[i]) { 1040 if (virt_dev->new_ep_rings[i]) {
@@ -1023,7 +1072,67 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1023 virt_dev->new_ep_rings[i] = NULL; 1072 virt_dev->new_ep_rings[i] = NULL;
1024 } 1073 }
1025 } 1074 }
1026 xhci_zero_in_ctx(virt_dev); 1075 xhci_zero_in_ctx(xhci, virt_dev);
1076}
1077
1078/* Deal with stalled endpoints. The core should have sent the control message
1079 * to clear the halt condition. However, we need to make the xHCI hardware
1080 * reset its sequence number, since a device will expect a sequence number of
1081 * zero after the halt condition is cleared.
1082 * Context: in_interrupt
1083 */
1084void xhci_endpoint_reset(struct usb_hcd *hcd,
1085 struct usb_host_endpoint *ep)
1086{
1087 struct xhci_hcd *xhci;
1088 struct usb_device *udev;
1089 unsigned int ep_index;
1090 unsigned long flags;
1091 int ret;
1092 struct xhci_dequeue_state deq_state;
1093 struct xhci_ring *ep_ring;
1094
1095 xhci = hcd_to_xhci(hcd);
1096 udev = (struct usb_device *) ep->hcpriv;
1097 /* Called with a root hub endpoint (or an endpoint that wasn't added
1098 * with xhci_add_endpoint()
1099 */
1100 if (!ep->hcpriv)
1101 return;
1102 ep_index = xhci_get_endpoint_index(&ep->desc);
1103 ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index];
1104 if (!ep_ring->stopped_td) {
1105 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
1106 ep->desc.bEndpointAddress);
1107 return;
1108 }
1109
1110 xhci_dbg(xhci, "Queueing reset endpoint command\n");
1111 spin_lock_irqsave(&xhci->lock, flags);
1112 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
1113 /*
1114 * Can't change the ring dequeue pointer until it's transitioned to the
1115 * stopped state, which is only upon a successful reset endpoint
1116 * command. Better hope that last command worked!
1117 */
1118 if (!ret) {
1119 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
1120 /* We need to move the HW's dequeue pointer past this TD,
1121 * or it will attempt to resend it on the next doorbell ring.
1122 */
1123 xhci_find_new_dequeue_state(xhci, udev->slot_id,
1124 ep_index, ep_ring->stopped_td, &deq_state);
1125 xhci_dbg(xhci, "Queueing new dequeue state\n");
1126 xhci_queue_new_dequeue_state(xhci, ep_ring,
1127 udev->slot_id,
1128 ep_index, &deq_state);
1129 kfree(ep_ring->stopped_td);
1130 xhci_ring_cmd_db(xhci);
1131 }
1132 spin_unlock_irqrestore(&xhci->lock, flags);
1133
1134 if (ret)
1135 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1027} 1136}
1028 1137
1029/* 1138/*
@@ -1120,7 +1229,9 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1120 struct xhci_virt_device *virt_dev; 1229 struct xhci_virt_device *virt_dev;
1121 int ret = 0; 1230 int ret = 0;
1122 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1231 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1123 u32 temp; 1232 struct xhci_slot_ctx *slot_ctx;
1233 struct xhci_input_control_ctx *ctrl_ctx;
1234 u64 temp_64;
1124 1235
1125 if (!udev->slot_id) { 1236 if (!udev->slot_id) {
1126 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); 1237 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
@@ -1133,10 +1244,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1133 if (!udev->config) 1244 if (!udev->config)
1134 xhci_setup_addressable_virt_dev(xhci, udev); 1245 xhci_setup_addressable_virt_dev(xhci, udev);
1135 /* Otherwise, assume the core has the device configured how it wants */ 1246 /* Otherwise, assume the core has the device configured how it wants */
1247 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1248 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1136 1249
1137 spin_lock_irqsave(&xhci->lock, flags); 1250 spin_lock_irqsave(&xhci->lock, flags);
1138 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, 1251 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
1139 udev->slot_id); 1252 udev->slot_id);
1140 if (ret) { 1253 if (ret) {
1141 spin_unlock_irqrestore(&xhci->lock, flags); 1254 spin_unlock_irqrestore(&xhci->lock, flags);
1142 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 1255 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
@@ -1176,41 +1289,37 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1176 default: 1289 default:
1177 xhci_err(xhci, "ERROR: unexpected command completion " 1290 xhci_err(xhci, "ERROR: unexpected command completion "
1178 "code 0x%x.\n", virt_dev->cmd_status); 1291 "code 0x%x.\n", virt_dev->cmd_status);
1292 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1293 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
1179 ret = -EINVAL; 1294 ret = -EINVAL;
1180 break; 1295 break;
1181 } 1296 }
1182 if (ret) { 1297 if (ret) {
1183 return ret; 1298 return ret;
1184 } 1299 }
1185 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); 1300 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
1186 xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); 1301 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
1187 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); 1302 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
1188 xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
1189 xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
1190 udev->slot_id,
1191 &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
1192 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
1193 xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
1194 udev->slot_id, 1303 udev->slot_id,
1195 &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], 1304 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
1196 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); 1305 (unsigned long long)
1306 xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
1197 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 1307 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
1198 (unsigned long long)virt_dev->out_ctx_dma); 1308 (unsigned long long)virt_dev->out_ctx->dma);
1199 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 1309 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1200 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); 1310 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1201 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 1311 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1202 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); 1312 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
1203 /* 1313 /*
1204 * USB core uses address 1 for the roothubs, so we add one to the 1314 * USB core uses address 1 for the roothubs, so we add one to the
1205 * address given back to us by the HC. 1315 * address given back to us by the HC.
1206 */ 1316 */
1207 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; 1317 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1318 udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
1208 /* Zero the input context control for later use */ 1319 /* Zero the input context control for later use */
1209 virt_dev->in_ctx->add_flags = 0; 1320 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1210 virt_dev->in_ctx->drop_flags = 0; 1321 ctrl_ctx->add_flags = 0;
1211 /* Mirror flags in the output context for future ep enable/disable */ 1322 ctrl_ctx->drop_flags = 0;
1212 virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
1213 virt_dev->out_ctx->drop_flags = 0;
1214 1323
1215 xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 1324 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
1216 /* XXX Meh, not sure if anyone else but choose_address uses this. */ 1325 /* XXX Meh, not sure if anyone else but choose_address uses this. */
@@ -1252,7 +1361,6 @@ static int __init xhci_hcd_init(void)
1252 /* xhci_device_control has eight fields, and also 1361 /* xhci_device_control has eight fields, and also
1253 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 1362 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1254 */ 1363 */
1255 BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
1256 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 1364 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
1257 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 1365 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
1258 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 1366 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index c8a72de1c508..e6b9a1c6002d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
88 return; 88 return;
89 prev->next = next; 89 prev->next = next;
90 if (link_trbs) { 90 if (link_trbs) {
91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; 91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
92 92
93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; 94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
@@ -189,6 +189,63 @@ fail:
189 return 0; 189 return 0;
190} 190}
191 191
192#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
193
194struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
195 int type, gfp_t flags)
196{
197 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
198 if (!ctx)
199 return NULL;
200
201 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
202 ctx->type = type;
203 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
204 if (type == XHCI_CTX_TYPE_INPUT)
205 ctx->size += CTX_SIZE(xhci->hcc_params);
206
207 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
208 memset(ctx->bytes, 0, ctx->size);
209 return ctx;
210}
211
212void xhci_free_container_ctx(struct xhci_hcd *xhci,
213 struct xhci_container_ctx *ctx)
214{
215 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
216 kfree(ctx);
217}
218
219struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
220 struct xhci_container_ctx *ctx)
221{
222 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
223 return (struct xhci_input_control_ctx *)ctx->bytes;
224}
225
226struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
227 struct xhci_container_ctx *ctx)
228{
229 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
230 return (struct xhci_slot_ctx *)ctx->bytes;
231
232 return (struct xhci_slot_ctx *)
233 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
234}
235
236struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
237 struct xhci_container_ctx *ctx,
238 unsigned int ep_index)
239{
240 /* increment ep index by offset of start of ep ctx array */
241 ep_index++;
242 if (ctx->type == XHCI_CTX_TYPE_INPUT)
243 ep_index++;
244
245 return (struct xhci_ep_ctx *)
246 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
247}
248
192/* All the xhci_tds in the ring's TD list should be freed at this point */ 249/* All the xhci_tds in the ring's TD list should be freed at this point */
193void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 250void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
194{ 251{
@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
200 return; 257 return;
201 258
202 dev = xhci->devs[slot_id]; 259 dev = xhci->devs[slot_id];
203 xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; 260 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
204 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
205 if (!dev) 261 if (!dev)
206 return; 262 return;
207 263
@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
210 xhci_ring_free(xhci, dev->ep_rings[i]); 266 xhci_ring_free(xhci, dev->ep_rings[i]);
211 267
212 if (dev->in_ctx) 268 if (dev->in_ctx)
213 dma_pool_free(xhci->device_pool, 269 xhci_free_container_ctx(xhci, dev->in_ctx);
214 dev->in_ctx, dev->in_ctx_dma);
215 if (dev->out_ctx) 270 if (dev->out_ctx)
216 dma_pool_free(xhci->device_pool, 271 xhci_free_container_ctx(xhci, dev->out_ctx);
217 dev->out_ctx, dev->out_ctx_dma); 272
218 kfree(xhci->devs[slot_id]); 273 kfree(xhci->devs[slot_id]);
219 xhci->devs[slot_id] = 0; 274 xhci->devs[slot_id] = 0;
220} 275}
@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
222int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 277int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
223 struct usb_device *udev, gfp_t flags) 278 struct usb_device *udev, gfp_t flags)
224{ 279{
225 dma_addr_t dma;
226 struct xhci_virt_device *dev; 280 struct xhci_virt_device *dev;
227 281
228 /* Slot ID 0 is reserved */ 282 /* Slot ID 0 is reserved */
@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
236 return 0; 290 return 0;
237 dev = xhci->devs[slot_id]; 291 dev = xhci->devs[slot_id];
238 292
239 /* Allocate the (output) device context that will be used in the HC */ 293 /* Allocate the (output) device context that will be used in the HC. */
240 dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); 294 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
241 if (!dev->out_ctx) 295 if (!dev->out_ctx)
242 goto fail; 296 goto fail;
243 dev->out_ctx_dma = dma; 297
244 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 298 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
245 (unsigned long long)dma); 299 (unsigned long long)dev->out_ctx->dma);
246 memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
247 300
248 /* Allocate the (input) device context for address device command */ 301 /* Allocate the (input) device context for address device command */
249 dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); 302 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
250 if (!dev->in_ctx) 303 if (!dev->in_ctx)
251 goto fail; 304 goto fail;
252 dev->in_ctx_dma = dma; 305
253 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 306 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
254 (unsigned long long)dma); 307 (unsigned long long)dev->in_ctx->dma);
255 memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
256 308
257 /* Allocate endpoint 0 ring */ 309 /* Allocate endpoint 0 ring */
258 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); 310 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
261 313
262 init_completion(&dev->cmd_completion); 314 init_completion(&dev->cmd_completion);
263 315
264 /* 316 /* Point to output device context in dcbaa. */
265 * Point to output device context in dcbaa; skip the output control 317 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
266 * context, which is eight 32 bit fields (or 32 bytes long)
267 */
268 xhci->dcbaa->dev_context_ptrs[2*slot_id] =
269 (u32) dev->out_ctx_dma + (32);
270 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 318 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
271 slot_id, 319 slot_id,
272 &xhci->dcbaa->dev_context_ptrs[2*slot_id], 320 &xhci->dcbaa->dev_context_ptrs[slot_id],
273 (unsigned long long)dev->out_ctx_dma); 321 (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
274 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
275 322
276 return 1; 323 return 1;
277fail: 324fail:
@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
285 struct xhci_virt_device *dev; 332 struct xhci_virt_device *dev;
286 struct xhci_ep_ctx *ep0_ctx; 333 struct xhci_ep_ctx *ep0_ctx;
287 struct usb_device *top_dev; 334 struct usb_device *top_dev;
335 struct xhci_slot_ctx *slot_ctx;
336 struct xhci_input_control_ctx *ctrl_ctx;
288 337
289 dev = xhci->devs[udev->slot_id]; 338 dev = xhci->devs[udev->slot_id];
290 /* Slot ID 0 is reserved */ 339 /* Slot ID 0 is reserved */
@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
293 udev->slot_id); 342 udev->slot_id);
294 return -EINVAL; 343 return -EINVAL;
295 } 344 }
296 ep0_ctx = &dev->in_ctx->ep[0]; 345 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
346 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
347 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
297 348
298 /* 2) New slot context and endpoint 0 context are valid*/ 349 /* 2) New slot context and endpoint 0 context are valid*/
299 dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 350 ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
300 351
301 /* 3) Only the control endpoint is valid - one endpoint context */ 352 /* 3) Only the control endpoint is valid - one endpoint context */
302 dev->in_ctx->slot.dev_info |= LAST_CTX(1); 353 slot_ctx->dev_info |= LAST_CTX(1);
303 354
304 switch (udev->speed) { 355 switch (udev->speed) {
305 case USB_SPEED_SUPER: 356 case USB_SPEED_SUPER:
306 dev->in_ctx->slot.dev_info |= (u32) udev->route; 357 slot_ctx->dev_info |= (u32) udev->route;
307 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; 358 slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
308 break; 359 break;
309 case USB_SPEED_HIGH: 360 case USB_SPEED_HIGH:
310 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; 361 slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
311 break; 362 break;
312 case USB_SPEED_FULL: 363 case USB_SPEED_FULL:
313 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; 364 slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
314 break; 365 break;
315 case USB_SPEED_LOW: 366 case USB_SPEED_LOW:
316 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; 367 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
317 break; 368 break;
318 case USB_SPEED_VARIABLE: 369 case USB_SPEED_VARIABLE:
319 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 370 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
327 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 378 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
328 top_dev = top_dev->parent) 379 top_dev = top_dev->parent)
329 /* Found device below root hub */; 380 /* Found device below root hub */;
330 dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); 381 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
331 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); 382 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
332 383
333 /* Is this a LS/FS device under a HS hub? */ 384 /* Is this a LS/FS device under a HS hub? */
@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
337 */ 388 */
338 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && 389 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
339 udev->tt) { 390 udev->tt) {
340 dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; 391 slot_ctx->tt_info = udev->tt->hub->slot_id;
341 dev->in_ctx->slot.tt_info |= udev->ttport << 8; 392 slot_ctx->tt_info |= udev->ttport << 8;
342 } 393 }
343 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 394 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
344 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 395 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
360 ep0_ctx->ep_info2 |= MAX_BURST(0); 411 ep0_ctx->ep_info2 |= MAX_BURST(0);
361 ep0_ctx->ep_info2 |= ERROR_COUNT(3); 412 ep0_ctx->ep_info2 |= ERROR_COUNT(3);
362 413
363 ep0_ctx->deq[0] = 414 ep0_ctx->deq =
364 dev->ep_rings[0]->first_seg->dma; 415 dev->ep_rings[0]->first_seg->dma;
365 ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; 416 ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
366 ep0_ctx->deq[1] = 0;
367 417
368 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 418 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
369 419
@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
470 unsigned int max_burst; 520 unsigned int max_burst;
471 521
472 ep_index = xhci_get_endpoint_index(&ep->desc); 522 ep_index = xhci_get_endpoint_index(&ep->desc);
473 ep_ctx = &virt_dev->in_ctx->ep[ep_index]; 523 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
474 524
475 /* Set up the endpoint ring */ 525 /* Set up the endpoint ring */
476 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); 526 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
477 if (!virt_dev->new_ep_rings[ep_index]) 527 if (!virt_dev->new_ep_rings[ep_index])
478 return -ENOMEM; 528 return -ENOMEM;
479 ep_ring = virt_dev->new_ep_rings[ep_index]; 529 ep_ring = virt_dev->new_ep_rings[ep_index];
480 ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; 530 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
481 ep_ctx->deq[1] = 0;
482 531
483 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 532 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
484 533
485 /* FIXME dig Mult and streams info out of ep companion desc */ 534 /* FIXME dig Mult and streams info out of ep companion desc */
486 535
487 /* Allow 3 retries for everything but isoc */ 536 /* Allow 3 retries for everything but isoc;
537 * error count = 0 means infinite retries.
538 */
488 if (!usb_endpoint_xfer_isoc(&ep->desc)) 539 if (!usb_endpoint_xfer_isoc(&ep->desc))
489 ep_ctx->ep_info2 = ERROR_COUNT(3); 540 ep_ctx->ep_info2 = ERROR_COUNT(3);
490 else 541 else
491 ep_ctx->ep_info2 = ERROR_COUNT(0); 542 ep_ctx->ep_info2 = ERROR_COUNT(1);
492 543
493 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); 544 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
494 545
@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
498 max_packet = ep->desc.wMaxPacketSize; 549 max_packet = ep->desc.wMaxPacketSize;
499 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 550 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
500 /* dig out max burst from ep companion desc */ 551 /* dig out max burst from ep companion desc */
501 max_packet = ep->ss_ep_comp->desc.bMaxBurst; 552 if (!ep->ss_ep_comp) {
553 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
554 max_packet = 0;
555 } else {
556 max_packet = ep->ss_ep_comp->desc.bMaxBurst;
557 }
502 ep_ctx->ep_info2 |= MAX_BURST(max_packet); 558 ep_ctx->ep_info2 |= MAX_BURST(max_packet);
503 break; 559 break;
504 case USB_SPEED_HIGH: 560 case USB_SPEED_HIGH:
@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
531 struct xhci_ep_ctx *ep_ctx; 587 struct xhci_ep_ctx *ep_ctx;
532 588
533 ep_index = xhci_get_endpoint_index(&ep->desc); 589 ep_index = xhci_get_endpoint_index(&ep->desc);
534 ep_ctx = &virt_dev->in_ctx->ep[ep_index]; 590 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
535 591
536 ep_ctx->ep_info = 0; 592 ep_ctx->ep_info = 0;
537 ep_ctx->ep_info2 = 0; 593 ep_ctx->ep_info2 = 0;
538 ep_ctx->deq[0] = 0; 594 ep_ctx->deq = 0;
539 ep_ctx->deq[1] = 0;
540 ep_ctx->tx_info = 0; 595 ep_ctx->tx_info = 0;
541 /* Don't free the endpoint ring until the set interface or configuration 596 /* Don't free the endpoint ring until the set interface or configuration
542 * request succeeds. 597 * request succeeds.
543 */ 598 */
544} 599}
545 600
601/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
602static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
603{
604 int i;
605 struct device *dev = xhci_to_hcd(xhci)->self.controller;
606 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
607
608 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
609
610 if (!num_sp)
611 return 0;
612
613 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
614 if (!xhci->scratchpad)
615 goto fail_sp;
616
617 xhci->scratchpad->sp_array =
618 pci_alloc_consistent(to_pci_dev(dev),
619 num_sp * sizeof(u64),
620 &xhci->scratchpad->sp_dma);
621 if (!xhci->scratchpad->sp_array)
622 goto fail_sp2;
623
624 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
625 if (!xhci->scratchpad->sp_buffers)
626 goto fail_sp3;
627
628 xhci->scratchpad->sp_dma_buffers =
629 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
630
631 if (!xhci->scratchpad->sp_dma_buffers)
632 goto fail_sp4;
633
634 xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
635 for (i = 0; i < num_sp; i++) {
636 dma_addr_t dma;
637 void *buf = pci_alloc_consistent(to_pci_dev(dev),
638 xhci->page_size, &dma);
639 if (!buf)
640 goto fail_sp5;
641
642 xhci->scratchpad->sp_array[i] = dma;
643 xhci->scratchpad->sp_buffers[i] = buf;
644 xhci->scratchpad->sp_dma_buffers[i] = dma;
645 }
646
647 return 0;
648
649 fail_sp5:
650 for (i = i - 1; i >= 0; i--) {
651 pci_free_consistent(to_pci_dev(dev), xhci->page_size,
652 xhci->scratchpad->sp_buffers[i],
653 xhci->scratchpad->sp_dma_buffers[i]);
654 }
655 kfree(xhci->scratchpad->sp_dma_buffers);
656
657 fail_sp4:
658 kfree(xhci->scratchpad->sp_buffers);
659
660 fail_sp3:
661 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
662 xhci->scratchpad->sp_array,
663 xhci->scratchpad->sp_dma);
664
665 fail_sp2:
666 kfree(xhci->scratchpad);
667 xhci->scratchpad = NULL;
668
669 fail_sp:
670 return -ENOMEM;
671}
672
673static void scratchpad_free(struct xhci_hcd *xhci)
674{
675 int num_sp;
676 int i;
677 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
678
679 if (!xhci->scratchpad)
680 return;
681
682 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
683
684 for (i = 0; i < num_sp; i++) {
685 pci_free_consistent(pdev, xhci->page_size,
686 xhci->scratchpad->sp_buffers[i],
687 xhci->scratchpad->sp_dma_buffers[i]);
688 }
689 kfree(xhci->scratchpad->sp_dma_buffers);
690 kfree(xhci->scratchpad->sp_buffers);
691 pci_free_consistent(pdev, num_sp * sizeof(u64),
692 xhci->scratchpad->sp_array,
693 xhci->scratchpad->sp_dma);
694 kfree(xhci->scratchpad);
695 xhci->scratchpad = NULL;
696}
697
546void xhci_mem_cleanup(struct xhci_hcd *xhci) 698void xhci_mem_cleanup(struct xhci_hcd *xhci)
547{ 699{
548 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 700 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
551 703
552 /* Free the Event Ring Segment Table and the actual Event Ring */ 704 /* Free the Event Ring Segment Table and the actual Event Ring */
553 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 705 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
554 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); 706 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
555 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); 707 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
556 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
557 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
558 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 708 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
559 if (xhci->erst.entries) 709 if (xhci->erst.entries)
560 pci_free_consistent(pdev, size, 710 pci_free_consistent(pdev, size,
@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
566 xhci->event_ring = NULL; 716 xhci->event_ring = NULL;
567 xhci_dbg(xhci, "Freed event ring\n"); 717 xhci_dbg(xhci, "Freed event ring\n");
568 718
569 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); 719 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
570 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
571 if (xhci->cmd_ring) 720 if (xhci->cmd_ring)
572 xhci_ring_free(xhci, xhci->cmd_ring); 721 xhci_ring_free(xhci, xhci->cmd_ring);
573 xhci->cmd_ring = NULL; 722 xhci->cmd_ring = NULL;
@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
586 xhci->device_pool = NULL; 735 xhci->device_pool = NULL;
587 xhci_dbg(xhci, "Freed device context pool\n"); 736 xhci_dbg(xhci, "Freed device context pool\n");
588 737
589 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); 738 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
590 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
591 if (xhci->dcbaa) 739 if (xhci->dcbaa)
592 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 740 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
593 xhci->dcbaa, xhci->dcbaa->dma); 741 xhci->dcbaa, xhci->dcbaa->dma);
@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
595 743
596 xhci->page_size = 0; 744 xhci->page_size = 0;
597 xhci->page_shift = 0; 745 xhci->page_shift = 0;
746 scratchpad_free(xhci);
598} 747}
599 748
600int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 749int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
602 dma_addr_t dma; 751 dma_addr_t dma;
603 struct device *dev = xhci_to_hcd(xhci)->self.controller; 752 struct device *dev = xhci_to_hcd(xhci)->self.controller;
604 unsigned int val, val2; 753 unsigned int val, val2;
754 u64 val_64;
605 struct xhci_segment *seg; 755 struct xhci_segment *seg;
606 u32 page_size; 756 u32 page_size;
607 int i; 757 int i;
@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
647 xhci->dcbaa->dma = dma; 797 xhci->dcbaa->dma = dma;
648 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 798 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
649 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 799 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
650 xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); 800 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
651 xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
652 801
653 /* 802 /*
654 * Initialize the ring segment pool. The ring must be a contiguous 803 * Initialize the ring segment pool. The ring must be a contiguous
@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
658 */ 807 */
659 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 808 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
660 SEGMENT_SIZE, 64, xhci->page_size); 809 SEGMENT_SIZE, 64, xhci->page_size);
810
661 /* See Table 46 and Note on Figure 55 */ 811 /* See Table 46 and Note on Figure 55 */
662 /* FIXME support 64-byte contexts */
663 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 812 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
664 sizeof(struct xhci_device_control), 813 2112, 64, xhci->page_size);
665 64, xhci->page_size);
666 if (!xhci->segment_pool || !xhci->device_pool) 814 if (!xhci->segment_pool || !xhci->device_pool)
667 goto fail; 815 goto fail;
668 816
@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
675 (unsigned long long)xhci->cmd_ring->first_seg->dma); 823 (unsigned long long)xhci->cmd_ring->first_seg->dma);
676 824
677 /* Set the address in the Command Ring Control register */ 825 /* Set the address in the Command Ring Control register */
678 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); 826 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
679 val = (val & ~CMD_RING_ADDR_MASK) | 827 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
680 (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | 828 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
681 xhci->cmd_ring->cycle_state; 829 xhci->cmd_ring->cycle_state;
682 xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); 830 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
683 xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); 831 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
684 xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
685 xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
686 xhci_dbg_cmd_ptrs(xhci); 832 xhci_dbg_cmd_ptrs(xhci);
687 833
688 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 834 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
722 /* set ring base address and size for each segment table entry */ 868 /* set ring base address and size for each segment table entry */
723 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 869 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
724 struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 870 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
725 entry->seg_addr[0] = seg->dma; 871 entry->seg_addr = seg->dma;
726 entry->seg_addr[1] = 0;
727 entry->seg_size = TRBS_PER_SEGMENT; 872 entry->seg_size = TRBS_PER_SEGMENT;
728 entry->rsvd = 0; 873 entry->rsvd = 0;
729 seg = seg->next; 874 seg = seg->next;
@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
741 /* set the segment table base address */ 886 /* set the segment table base address */
742 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 887 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
743 (unsigned long long)xhci->erst.erst_dma_addr); 888 (unsigned long long)xhci->erst.erst_dma_addr);
744 val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); 889 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
745 val &= ERST_PTR_MASK; 890 val_64 &= ERST_PTR_MASK;
746 val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); 891 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
747 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); 892 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
748 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
749 893
750 /* Set the event ring dequeue address */ 894 /* Set the event ring dequeue address */
751 xhci_set_hc_event_deq(xhci); 895 xhci_set_hc_event_deq(xhci);
@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
761 for (i = 0; i < MAX_HC_SLOTS; ++i) 905 for (i = 0; i < MAX_HC_SLOTS; ++i)
762 xhci->devs[i] = 0; 906 xhci->devs[i] = 0;
763 907
908 if (scratchpad_alloc(xhci, flags))
909 goto fail;
910
764 return 0; 911 return 0;
912
765fail: 913fail:
766 xhci_warn(xhci, "Couldn't initialize memory\n"); 914 xhci_warn(xhci, "Couldn't initialize memory\n");
767 xhci_mem_cleanup(xhci); 915 xhci_mem_cleanup(xhci);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 1462709e26c0..592fe7e623f7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
117 .free_dev = xhci_free_dev, 117 .free_dev = xhci_free_dev,
118 .add_endpoint = xhci_add_endpoint, 118 .add_endpoint = xhci_add_endpoint,
119 .drop_endpoint = xhci_drop_endpoint, 119 .drop_endpoint = xhci_drop_endpoint,
120 .endpoint_reset = xhci_endpoint_reset,
120 .check_bandwidth = xhci_check_bandwidth, 121 .check_bandwidth = xhci_check_bandwidth,
121 .reset_bandwidth = xhci_reset_bandwidth, 122 .reset_bandwidth = xhci_reset_bandwidth,
122 .address_device = xhci_address_device, 123 .address_device = xhci_address_device,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 02d81985c454..aa88a067148b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -135,6 +135,7 @@ static void next_trb(struct xhci_hcd *xhci,
135static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 135static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
136{ 136{
137 union xhci_trb *next = ++(ring->dequeue); 137 union xhci_trb *next = ++(ring->dequeue);
138 unsigned long long addr;
138 139
139 ring->deq_updates++; 140 ring->deq_updates++;
140 /* Update the dequeue pointer further if that was a link TRB or we're at 141 /* Update the dequeue pointer further if that was a link TRB or we're at
@@ -152,6 +153,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
152 ring->dequeue = ring->deq_seg->trbs; 153 ring->dequeue = ring->deq_seg->trbs;
153 next = ring->dequeue; 154 next = ring->dequeue;
154 } 155 }
156 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
157 if (ring == xhci->event_ring)
158 xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
159 else if (ring == xhci->cmd_ring)
160 xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
161 else
162 xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
155} 163}
156 164
157/* 165/*
@@ -171,6 +179,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
171{ 179{
172 u32 chain; 180 u32 chain;
173 union xhci_trb *next; 181 union xhci_trb *next;
182 unsigned long long addr;
174 183
175 chain = ring->enqueue->generic.field[3] & TRB_CHAIN; 184 chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
176 next = ++(ring->enqueue); 185 next = ++(ring->enqueue);
@@ -204,6 +213,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
204 ring->enqueue = ring->enq_seg->trbs; 213 ring->enqueue = ring->enq_seg->trbs;
205 next = ring->enqueue; 214 next = ring->enqueue;
206 } 215 }
216 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
217 if (ring == xhci->event_ring)
218 xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
219 else if (ring == xhci->cmd_ring)
220 xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
221 else
222 xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
207} 223}
208 224
209/* 225/*
@@ -237,7 +253,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
237 253
238void xhci_set_hc_event_deq(struct xhci_hcd *xhci) 254void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
239{ 255{
240 u32 temp; 256 u64 temp;
241 dma_addr_t deq; 257 dma_addr_t deq;
242 258
243 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 259 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
@@ -246,13 +262,15 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
246 xhci_warn(xhci, "WARN something wrong with SW event ring " 262 xhci_warn(xhci, "WARN something wrong with SW event ring "
247 "dequeue ptr.\n"); 263 "dequeue ptr.\n");
248 /* Update HC event ring dequeue pointer */ 264 /* Update HC event ring dequeue pointer */
249 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 265 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
250 temp &= ERST_PTR_MASK; 266 temp &= ERST_PTR_MASK;
251 if (!in_interrupt()) 267 /* Don't clear the EHB bit (which is RW1C) because
252 xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); 268 * there might be more events to service.
253 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); 269 */
254 xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, 270 temp &= ~ERST_EHB;
255 &xhci->ir_set->erst_dequeue[0]); 271 xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
272 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
273 &xhci->ir_set->erst_dequeue);
256} 274}
257 275
258/* Ring the host controller doorbell after placing a command on the ring */ 276/* Ring the host controller doorbell after placing a command on the ring */
@@ -279,7 +297,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
279 /* Don't ring the doorbell for this endpoint if there are pending 297 /* Don't ring the doorbell for this endpoint if there are pending
280 * cancellations because the we don't want to interrupt processing. 298 * cancellations because the we don't want to interrupt processing.
281 */ 299 */
282 if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { 300 if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)
301 && !(ep_ring->state & EP_HALTED)) {
283 field = xhci_readl(xhci, db_addr) & DB_MASK; 302 field = xhci_readl(xhci, db_addr) & DB_MASK;
284 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 303 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
285 /* Flush PCI posted writes - FIXME Matthew Wilcox says this 304 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
@@ -316,12 +335,6 @@ static struct xhci_segment *find_trb_seg(
316 return cur_seg; 335 return cur_seg;
317} 336}
318 337
319struct dequeue_state {
320 struct xhci_segment *new_deq_seg;
321 union xhci_trb *new_deq_ptr;
322 int new_cycle_state;
323};
324
325/* 338/*
326 * Move the xHC's endpoint ring dequeue pointer past cur_td. 339 * Move the xHC's endpoint ring dequeue pointer past cur_td.
327 * Record the new state of the xHC's endpoint ring dequeue segment, 340 * Record the new state of the xHC's endpoint ring dequeue segment,
@@ -336,24 +349,30 @@ struct dequeue_state {
336 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 349 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
337 * if we've moved it past a link TRB with the toggle cycle bit set. 350 * if we've moved it past a link TRB with the toggle cycle bit set.
338 */ 351 */
339static void find_new_dequeue_state(struct xhci_hcd *xhci, 352void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
340 unsigned int slot_id, unsigned int ep_index, 353 unsigned int slot_id, unsigned int ep_index,
341 struct xhci_td *cur_td, struct dequeue_state *state) 354 struct xhci_td *cur_td, struct xhci_dequeue_state *state)
342{ 355{
343 struct xhci_virt_device *dev = xhci->devs[slot_id]; 356 struct xhci_virt_device *dev = xhci->devs[slot_id];
344 struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; 357 struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
345 struct xhci_generic_trb *trb; 358 struct xhci_generic_trb *trb;
359 struct xhci_ep_ctx *ep_ctx;
360 dma_addr_t addr;
346 361
347 state->new_cycle_state = 0; 362 state->new_cycle_state = 0;
363 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
348 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 364 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
349 ep_ring->stopped_trb, 365 ep_ring->stopped_trb,
350 &state->new_cycle_state); 366 &state->new_cycle_state);
351 if (!state->new_deq_seg) 367 if (!state->new_deq_seg)
352 BUG(); 368 BUG();
353 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 369 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
354 state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; 370 xhci_dbg(xhci, "Finding endpoint context\n");
371 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
372 state->new_cycle_state = 0x1 & ep_ctx->deq;
355 373
356 state->new_deq_ptr = cur_td->last_trb; 374 state->new_deq_ptr = cur_td->last_trb;
375 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
357 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 376 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
358 state->new_deq_ptr, 377 state->new_deq_ptr,
359 &state->new_cycle_state); 378 &state->new_cycle_state);
@@ -367,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
367 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 386 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
368 387
369 /* Don't update the ring cycle state for the producer (us). */ 388 /* Don't update the ring cycle state for the producer (us). */
389 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
390 state->new_deq_seg);
391 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
392 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
393 (unsigned long long) addr);
394 xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
370 ep_ring->dequeue = state->new_deq_ptr; 395 ep_ring->dequeue = state->new_deq_ptr;
371 ep_ring->deq_seg = state->new_deq_seg; 396 ep_ring->deq_seg = state->new_deq_seg;
372} 397}
@@ -416,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
416 unsigned int ep_index, struct xhci_segment *deq_seg, 441 unsigned int ep_index, struct xhci_segment *deq_seg,
417 union xhci_trb *deq_ptr, u32 cycle_state); 442 union xhci_trb *deq_ptr, u32 cycle_state);
418 443
444void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
445 struct xhci_ring *ep_ring, unsigned int slot_id,
446 unsigned int ep_index, struct xhci_dequeue_state *deq_state)
447{
448 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
449 "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
450 deq_state->new_deq_seg,
451 (unsigned long long)deq_state->new_deq_seg->dma,
452 deq_state->new_deq_ptr,
453 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
454 deq_state->new_cycle_state);
455 queue_set_tr_deq(xhci, slot_id, ep_index,
456 deq_state->new_deq_seg,
457 deq_state->new_deq_ptr,
458 (u32) deq_state->new_cycle_state);
459 /* Stop the TD queueing code from ringing the doorbell until
460 * this command completes. The HC won't set the dequeue pointer
461 * if the ring is running, and ringing the doorbell starts the
462 * ring running.
463 */
464 ep_ring->state |= SET_DEQ_PENDING;
465 xhci_ring_cmd_db(xhci);
466}
467
419/* 468/*
420 * When we get a command completion for a Stop Endpoint Command, we need to 469 * When we get a command completion for a Stop Endpoint Command, we need to
421 * unlink any cancelled TDs from the ring. There are two ways to do that: 470 * unlink any cancelled TDs from the ring. There are two ways to do that:
@@ -436,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
436 struct xhci_td *cur_td = 0; 485 struct xhci_td *cur_td = 0;
437 struct xhci_td *last_unlinked_td; 486 struct xhci_td *last_unlinked_td;
438 487
439 struct dequeue_state deq_state; 488 struct xhci_dequeue_state deq_state;
440#ifdef CONFIG_USB_HCD_STAT 489#ifdef CONFIG_USB_HCD_STAT
441 ktime_t stop_time = ktime_get(); 490 ktime_t stop_time = ktime_get();
442#endif 491#endif
@@ -464,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
464 * move the xHC endpoint ring dequeue pointer past this TD. 513 * move the xHC endpoint ring dequeue pointer past this TD.
465 */ 514 */
466 if (cur_td == ep_ring->stopped_td) 515 if (cur_td == ep_ring->stopped_td)
467 find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, 516 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
468 &deq_state); 517 &deq_state);
469 else 518 else
470 td_to_noop(xhci, ep_ring, cur_td); 519 td_to_noop(xhci, ep_ring, cur_td);
@@ -480,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
480 529
481 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 530 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
482 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 531 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
483 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 532 xhci_queue_new_dequeue_state(xhci, ep_ring,
484 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 533 slot_id, ep_index, &deq_state);
485 deq_state.new_deq_seg,
486 (unsigned long long)deq_state.new_deq_seg->dma,
487 deq_state.new_deq_ptr,
488 (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
489 deq_state.new_cycle_state);
490 queue_set_tr_deq(xhci, slot_id, ep_index,
491 deq_state.new_deq_seg,
492 deq_state.new_deq_ptr,
493 (u32) deq_state.new_cycle_state);
494 /* Stop the TD queueing code from ringing the doorbell until
495 * this command completes. The HC won't set the dequeue pointer
496 * if the ring is running, and ringing the doorbell starts the
497 * ring running.
498 */
499 ep_ring->state |= SET_DEQ_PENDING;
500 xhci_ring_cmd_db(xhci);
501 } else { 534 } else {
502 /* Otherwise just ring the doorbell to restart the ring */ 535 /* Otherwise just ring the doorbell to restart the ring */
503 ring_ep_doorbell(xhci, slot_id, ep_index); 536 ring_ep_doorbell(xhci, slot_id, ep_index);
@@ -551,11 +584,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
551 unsigned int ep_index; 584 unsigned int ep_index;
552 struct xhci_ring *ep_ring; 585 struct xhci_ring *ep_ring;
553 struct xhci_virt_device *dev; 586 struct xhci_virt_device *dev;
587 struct xhci_ep_ctx *ep_ctx;
588 struct xhci_slot_ctx *slot_ctx;
554 589
555 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 590 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
556 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 591 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
557 dev = xhci->devs[slot_id]; 592 dev = xhci->devs[slot_id];
558 ep_ring = dev->ep_rings[ep_index]; 593 ep_ring = dev->ep_rings[ep_index];
594 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
595 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
559 596
560 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { 597 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
561 unsigned int ep_state; 598 unsigned int ep_state;
@@ -569,9 +606,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
569 case COMP_CTX_STATE: 606 case COMP_CTX_STATE:
570 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 607 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
571 "to incorrect slot or ep state.\n"); 608 "to incorrect slot or ep state.\n");
572 ep_state = dev->out_ctx->ep[ep_index].ep_info; 609 ep_state = ep_ctx->ep_info;
573 ep_state &= EP_STATE_MASK; 610 ep_state &= EP_STATE_MASK;
574 slot_state = dev->out_ctx->slot.dev_state; 611 slot_state = slot_ctx->dev_state;
575 slot_state = GET_SLOT_STATE(slot_state); 612 slot_state = GET_SLOT_STATE(slot_state);
576 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 613 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
577 slot_state, ep_state); 614 slot_state, ep_state);
@@ -593,16 +630,33 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
593 * cancelling URBs, which might not be an error... 630 * cancelling URBs, which might not be an error...
594 */ 631 */
595 } else { 632 } else {
596 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " 633 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
597 "deq[1] = 0x%x.\n", 634 ep_ctx->deq);
598 dev->out_ctx->ep[ep_index].deq[0],
599 dev->out_ctx->ep[ep_index].deq[1]);
600 } 635 }
601 636
602 ep_ring->state &= ~SET_DEQ_PENDING; 637 ep_ring->state &= ~SET_DEQ_PENDING;
603 ring_ep_doorbell(xhci, slot_id, ep_index); 638 ring_ep_doorbell(xhci, slot_id, ep_index);
604} 639}
605 640
641static void handle_reset_ep_completion(struct xhci_hcd *xhci,
642 struct xhci_event_cmd *event,
643 union xhci_trb *trb)
644{
645 int slot_id;
646 unsigned int ep_index;
647
648 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
649 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
650 /* This command will only fail if the endpoint wasn't halted,
651 * but we don't care.
652 */
653 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
654 (unsigned int) GET_COMP_CODE(event->status));
655
656 /* Clear our internal halted state and restart the ring */
657 xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED;
658 ring_ep_doorbell(xhci, slot_id, ep_index);
659}
606 660
607static void handle_cmd_completion(struct xhci_hcd *xhci, 661static void handle_cmd_completion(struct xhci_hcd *xhci,
608 struct xhci_event_cmd *event) 662 struct xhci_event_cmd *event)
@@ -611,7 +665,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
611 u64 cmd_dma; 665 u64 cmd_dma;
612 dma_addr_t cmd_dequeue_dma; 666 dma_addr_t cmd_dequeue_dma;
613 667
614 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; 668 cmd_dma = event->cmd_trb;
615 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 669 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
616 xhci->cmd_ring->dequeue); 670 xhci->cmd_ring->dequeue);
617 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 671 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
@@ -653,6 +707,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
653 case TRB_TYPE(TRB_CMD_NOOP): 707 case TRB_TYPE(TRB_CMD_NOOP):
654 ++xhci->noops_handled; 708 ++xhci->noops_handled;
655 break; 709 break;
710 case TRB_TYPE(TRB_RESET_EP):
711 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
712 break;
656 default: 713 default:
657 /* Skip over unknown commands on the event ring */ 714 /* Skip over unknown commands on the event ring */
658 xhci->error_bitmask |= 1 << 6; 715 xhci->error_bitmask |= 1 << 6;
@@ -756,7 +813,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
756 union xhci_trb *event_trb; 813 union xhci_trb *event_trb;
757 struct urb *urb = 0; 814 struct urb *urb = 0;
758 int status = -EINPROGRESS; 815 int status = -EINPROGRESS;
816 struct xhci_ep_ctx *ep_ctx;
759 817
818 xhci_dbg(xhci, "In %s\n", __func__);
760 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; 819 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
761 if (!xdev) { 820 if (!xdev) {
762 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 821 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
@@ -765,17 +824,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
765 824
766 /* Endpoint ID is 1 based, our index is zero based */ 825 /* Endpoint ID is 1 based, our index is zero based */
767 ep_index = TRB_TO_EP_ID(event->flags) - 1; 826 ep_index = TRB_TO_EP_ID(event->flags) - 1;
827 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
768 ep_ring = xdev->ep_rings[ep_index]; 828 ep_ring = xdev->ep_rings[ep_index];
769 if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 829 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
830 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
770 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 831 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
771 return -ENODEV; 832 return -ENODEV;
772 } 833 }
773 834
774 event_dma = event->buffer[0]; 835 event_dma = event->buffer;
775 if (event->buffer[1] != 0)
776 xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
777
778 /* This TRB should be in the TD at the head of this ring's TD list */ 836 /* This TRB should be in the TD at the head of this ring's TD list */
837 xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
779 if (list_empty(&ep_ring->td_list)) { 838 if (list_empty(&ep_ring->td_list)) {
780 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 839 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
781 TRB_TO_SLOT_ID(event->flags), ep_index); 840 TRB_TO_SLOT_ID(event->flags), ep_index);
@@ -785,11 +844,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
785 urb = NULL; 844 urb = NULL;
786 goto cleanup; 845 goto cleanup;
787 } 846 }
847 xhci_dbg(xhci, "%s - getting list entry\n", __func__);
788 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 848 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
789 849
790 /* Is this a TRB in the currently executing TD? */ 850 /* Is this a TRB in the currently executing TD? */
851 xhci_dbg(xhci, "%s - looking for TD\n", __func__);
791 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 852 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
792 td->last_trb, event_dma); 853 td->last_trb, event_dma);
854 xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
793 if (!event_seg) { 855 if (!event_seg) {
794 /* HC is busted, give up! */ 856 /* HC is busted, give up! */
795 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); 857 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
@@ -798,10 +860,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
798 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; 860 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
799 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 861 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
800 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 862 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
801 xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", 863 xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
802 (unsigned int) event->buffer[0]); 864 lower_32_bits(event->buffer));
803 xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", 865 xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
804 (unsigned int) event->buffer[1]); 866 upper_32_bits(event->buffer));
805 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", 867 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
806 (unsigned int) event->transfer_len); 868 (unsigned int) event->transfer_len);
807 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", 869 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
@@ -823,6 +885,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
823 break; 885 break;
824 case COMP_STALL: 886 case COMP_STALL:
825 xhci_warn(xhci, "WARN: Stalled endpoint\n"); 887 xhci_warn(xhci, "WARN: Stalled endpoint\n");
888 ep_ring->state |= EP_HALTED;
826 status = -EPIPE; 889 status = -EPIPE;
827 break; 890 break;
828 case COMP_TRB_ERR: 891 case COMP_TRB_ERR:
@@ -833,6 +896,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
833 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 896 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
834 status = -EPROTO; 897 status = -EPROTO;
835 break; 898 break;
899 case COMP_BABBLE:
900 xhci_warn(xhci, "WARN: babble error on endpoint\n");
901 status = -EOVERFLOW;
902 break;
836 case COMP_DB_ERR: 903 case COMP_DB_ERR:
837 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 904 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
838 status = -ENOSR; 905 status = -ENOSR;
@@ -874,15 +941,26 @@ static int handle_tx_event(struct xhci_hcd *xhci,
874 if (event_trb != ep_ring->dequeue) { 941 if (event_trb != ep_ring->dequeue) {
875 /* The event was for the status stage */ 942 /* The event was for the status stage */
876 if (event_trb == td->last_trb) { 943 if (event_trb == td->last_trb) {
877 td->urb->actual_length = 944 if (td->urb->actual_length != 0) {
878 td->urb->transfer_buffer_length; 945 /* Don't overwrite a previously set error code */
946 if (status == -EINPROGRESS || status == 0)
947 /* Did we already see a short data stage? */
948 status = -EREMOTEIO;
949 } else {
950 td->urb->actual_length =
951 td->urb->transfer_buffer_length;
952 }
879 } else { 953 } else {
880 /* Maybe the event was for the data stage? */ 954 /* Maybe the event was for the data stage? */
881 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) 955 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) {
882 /* We didn't stop on a link TRB in the middle */ 956 /* We didn't stop on a link TRB in the middle */
883 td->urb->actual_length = 957 td->urb->actual_length =
884 td->urb->transfer_buffer_length - 958 td->urb->transfer_buffer_length -
885 TRB_LEN(event->transfer_len); 959 TRB_LEN(event->transfer_len);
960 xhci_dbg(xhci, "Waiting for status stage event\n");
961 urb = NULL;
962 goto cleanup;
963 }
886 } 964 }
887 } 965 }
888 } else { 966 } else {
@@ -929,16 +1007,20 @@ static int handle_tx_event(struct xhci_hcd *xhci,
929 TRB_LEN(event->transfer_len)); 1007 TRB_LEN(event->transfer_len));
930 td->urb->actual_length = 0; 1008 td->urb->actual_length = 0;
931 } 1009 }
932 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1010 /* Don't overwrite a previously set error code */
933 status = -EREMOTEIO; 1011 if (status == -EINPROGRESS) {
934 else 1012 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
935 status = 0; 1013 status = -EREMOTEIO;
1014 else
1015 status = 0;
1016 }
936 } else { 1017 } else {
937 td->urb->actual_length = td->urb->transfer_buffer_length; 1018 td->urb->actual_length = td->urb->transfer_buffer_length;
938 /* Ignore a short packet completion if the 1019 /* Ignore a short packet completion if the
939 * untransferred length was zero. 1020 * untransferred length was zero.
940 */ 1021 */
941 status = 0; 1022 if (status == -EREMOTEIO)
1023 status = 0;
942 } 1024 }
943 } else { 1025 } else {
944 /* Slow path - walk the list, starting from the dequeue 1026 /* Slow path - walk the list, starting from the dequeue
@@ -965,19 +1047,30 @@ static int handle_tx_event(struct xhci_hcd *xhci,
965 TRB_LEN(event->transfer_len); 1047 TRB_LEN(event->transfer_len);
966 } 1048 }
967 } 1049 }
968 /* The Endpoint Stop Command completion will take care of
969 * any stopped TDs. A stopped TD may be restarted, so don't update the
970 * ring dequeue pointer or take this TD off any lists yet.
971 */
972 if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || 1050 if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
973 GET_COMP_CODE(event->transfer_len) == COMP_STOP) { 1051 GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
1052 /* The Endpoint Stop Command completion will take care of any
1053 * stopped TDs. A stopped TD may be restarted, so don't update
1054 * the ring dequeue pointer or take this TD off any lists yet.
1055 */
974 ep_ring->stopped_td = td; 1056 ep_ring->stopped_td = td;
975 ep_ring->stopped_trb = event_trb; 1057 ep_ring->stopped_trb = event_trb;
976 } else { 1058 } else {
977 /* Update ring dequeue pointer */ 1059 if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) {
978 while (ep_ring->dequeue != td->last_trb) 1060 /* The transfer is completed from the driver's
1061 * perspective, but we need to issue a set dequeue
1062 * command for this stalled endpoint to move the dequeue
1063 * pointer past the TD. We can't do that here because
1064 * the halt condition must be cleared first.
1065 */
1066 ep_ring->stopped_td = td;
1067 ep_ring->stopped_trb = event_trb;
1068 } else {
1069 /* Update ring dequeue pointer */
1070 while (ep_ring->dequeue != td->last_trb)
1071 inc_deq(xhci, ep_ring, false);
979 inc_deq(xhci, ep_ring, false); 1072 inc_deq(xhci, ep_ring, false);
980 inc_deq(xhci, ep_ring, false); 1073 }
981 1074
982 /* Clean up the endpoint's TD list */ 1075 /* Clean up the endpoint's TD list */
983 urb = td->urb; 1076 urb = td->urb;
@@ -987,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
987 list_del(&td->cancelled_td_list); 1080 list_del(&td->cancelled_td_list);
988 ep_ring->cancels_pending--; 1081 ep_ring->cancels_pending--;
989 } 1082 }
990 kfree(td); 1083 /* Leave the TD around for the reset endpoint function to use */
1084 if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
1085 kfree(td);
1086 }
991 urb->hcpriv = NULL; 1087 urb->hcpriv = NULL;
992 } 1088 }
993cleanup: 1089cleanup:
@@ -997,6 +1093,8 @@ cleanup:
997 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ 1093 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
998 if (urb) { 1094 if (urb) {
999 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); 1095 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1096 xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
1097 urb, td->urb->actual_length, status);
1000 spin_unlock(&xhci->lock); 1098 spin_unlock(&xhci->lock);
1001 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); 1099 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
1002 spin_lock(&xhci->lock); 1100 spin_lock(&xhci->lock);
@@ -1014,6 +1112,7 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1014 int update_ptrs = 1; 1112 int update_ptrs = 1;
1015 int ret; 1113 int ret;
1016 1114
1115 xhci_dbg(xhci, "In %s\n", __func__);
1017 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 1116 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
1018 xhci->error_bitmask |= 1 << 1; 1117 xhci->error_bitmask |= 1 << 1;
1019 return; 1118 return;
@@ -1026,18 +1125,25 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1026 xhci->error_bitmask |= 1 << 2; 1125 xhci->error_bitmask |= 1 << 2;
1027 return; 1126 return;
1028 } 1127 }
1128 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
1029 1129
1030 /* FIXME: Handle more event types. */ 1130 /* FIXME: Handle more event types. */
1031 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { 1131 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
1032 case TRB_TYPE(TRB_COMPLETION): 1132 case TRB_TYPE(TRB_COMPLETION):
1133 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
1033 handle_cmd_completion(xhci, &event->event_cmd); 1134 handle_cmd_completion(xhci, &event->event_cmd);
1135 xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
1034 break; 1136 break;
1035 case TRB_TYPE(TRB_PORT_STATUS): 1137 case TRB_TYPE(TRB_PORT_STATUS):
1138 xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
1036 handle_port_status(xhci, event); 1139 handle_port_status(xhci, event);
1140 xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
1037 update_ptrs = 0; 1141 update_ptrs = 0;
1038 break; 1142 break;
1039 case TRB_TYPE(TRB_TRANSFER): 1143 case TRB_TYPE(TRB_TRANSFER):
1144 xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
1040 ret = handle_tx_event(xhci, &event->trans_event); 1145 ret = handle_tx_event(xhci, &event->trans_event);
1146 xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
1041 if (ret < 0) 1147 if (ret < 0)
1042 xhci->error_bitmask |= 1 << 9; 1148 xhci->error_bitmask |= 1 << 9;
1043 else 1149 else
@@ -1093,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
1093 */ 1199 */
1094 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 1200 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
1095 return -ENOENT; 1201 return -ENOENT;
1096 case EP_STATE_HALTED:
1097 case EP_STATE_ERROR: 1202 case EP_STATE_ERROR:
1098 xhci_warn(xhci, "WARN waiting for halt or error on ep " 1203 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
1099 "to be cleared\n");
1100 /* FIXME event handling code for error needs to clear it */ 1204 /* FIXME event handling code for error needs to clear it */
1101 /* XXX not sure if this should be -ENOENT or not */ 1205 /* XXX not sure if this should be -ENOENT or not */
1102 return -EINVAL; 1206 return -EINVAL;
1207 case EP_STATE_HALTED:
1208 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
1103 case EP_STATE_STOPPED: 1209 case EP_STATE_STOPPED:
1104 case EP_STATE_RUNNING: 1210 case EP_STATE_RUNNING:
1105 break; 1211 break;
@@ -1128,9 +1234,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1128 gfp_t mem_flags) 1234 gfp_t mem_flags)
1129{ 1235{
1130 int ret; 1236 int ret;
1131 1237 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1132 ret = prepare_ring(xhci, xdev->ep_rings[ep_index], 1238 ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
1133 xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, 1239 ep_ctx->ep_info & EP_STATE_MASK,
1134 num_trbs, mem_flags); 1240 num_trbs, mem_flags);
1135 if (ret) 1241 if (ret)
1136 return ret; 1242 return ret;
@@ -1285,6 +1391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1285 /* Queue the first TRB, even if it's zero-length */ 1391 /* Queue the first TRB, even if it's zero-length */
1286 do { 1392 do {
1287 u32 field = 0; 1393 u32 field = 0;
1394 u32 length_field = 0;
1288 1395
1289 /* Don't change the cycle bit of the first TRB until later */ 1396 /* Don't change the cycle bit of the first TRB until later */
1290 if (first_trb) 1397 if (first_trb)
@@ -1314,10 +1421,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1314 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 1421 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1315 (unsigned int) addr + trb_buff_len); 1422 (unsigned int) addr + trb_buff_len);
1316 } 1423 }
1424 length_field = TRB_LEN(trb_buff_len) |
1425 TD_REMAINDER(urb->transfer_buffer_length - running_total) |
1426 TRB_INTR_TARGET(0);
1317 queue_trb(xhci, ep_ring, false, 1427 queue_trb(xhci, ep_ring, false,
1318 (u32) addr, 1428 lower_32_bits(addr),
1319 (u32) ((u64) addr >> 32), 1429 upper_32_bits(addr),
1320 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), 1430 length_field,
1321 /* We always want to know if the TRB was short, 1431 /* We always want to know if the TRB was short,
1322 * or we won't get an event when it completes. 1432 * or we won't get an event when it completes.
1323 * (Unless we use event data TRBs, which are a 1433 * (Unless we use event data TRBs, which are a
@@ -1365,7 +1475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1365 struct xhci_generic_trb *start_trb; 1475 struct xhci_generic_trb *start_trb;
1366 bool first_trb; 1476 bool first_trb;
1367 int start_cycle; 1477 int start_cycle;
1368 u32 field; 1478 u32 field, length_field;
1369 1479
1370 int running_total, trb_buff_len, ret; 1480 int running_total, trb_buff_len, ret;
1371 u64 addr; 1481 u64 addr;
@@ -1443,10 +1553,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1443 td->last_trb = ep_ring->enqueue; 1553 td->last_trb = ep_ring->enqueue;
1444 field |= TRB_IOC; 1554 field |= TRB_IOC;
1445 } 1555 }
1556 length_field = TRB_LEN(trb_buff_len) |
1557 TD_REMAINDER(urb->transfer_buffer_length - running_total) |
1558 TRB_INTR_TARGET(0);
1446 queue_trb(xhci, ep_ring, false, 1559 queue_trb(xhci, ep_ring, false,
1447 (u32) addr, 1560 lower_32_bits(addr),
1448 (u32) ((u64) addr >> 32), 1561 upper_32_bits(addr),
1449 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), 1562 length_field,
1450 /* We always want to know if the TRB was short, 1563 /* We always want to know if the TRB was short,
1451 * or we won't get an event when it completes. 1564 * or we won't get an event when it completes.
1452 * (Unless we use event data TRBs, which are a 1565 * (Unless we use event data TRBs, which are a
@@ -1478,7 +1591,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1478 struct usb_ctrlrequest *setup; 1591 struct usb_ctrlrequest *setup;
1479 struct xhci_generic_trb *start_trb; 1592 struct xhci_generic_trb *start_trb;
1480 int start_cycle; 1593 int start_cycle;
1481 u32 field; 1594 u32 field, length_field;
1482 struct xhci_td *td; 1595 struct xhci_td *td;
1483 1596
1484 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1597 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
@@ -1528,13 +1641,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1528 1641
1529 /* If there's data, queue data TRBs */ 1642 /* If there's data, queue data TRBs */
1530 field = 0; 1643 field = 0;
1644 length_field = TRB_LEN(urb->transfer_buffer_length) |
1645 TD_REMAINDER(urb->transfer_buffer_length) |
1646 TRB_INTR_TARGET(0);
1531 if (urb->transfer_buffer_length > 0) { 1647 if (urb->transfer_buffer_length > 0) {
1532 if (setup->bRequestType & USB_DIR_IN) 1648 if (setup->bRequestType & USB_DIR_IN)
1533 field |= TRB_DIR_IN; 1649 field |= TRB_DIR_IN;
1534 queue_trb(xhci, ep_ring, false, 1650 queue_trb(xhci, ep_ring, false,
1535 lower_32_bits(urb->transfer_dma), 1651 lower_32_bits(urb->transfer_dma),
1536 upper_32_bits(urb->transfer_dma), 1652 upper_32_bits(urb->transfer_dma),
1537 TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), 1653 length_field,
1538 /* Event on short tx */ 1654 /* Event on short tx */
1539 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); 1655 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
1540 } 1656 }
@@ -1603,7 +1719,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
1603int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1719int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1604 u32 slot_id) 1720 u32 slot_id)
1605{ 1721{
1606 return queue_command(xhci, in_ctx_ptr, 0, 0, 1722 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1723 upper_32_bits(in_ctx_ptr), 0,
1607 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); 1724 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
1608} 1725}
1609 1726
@@ -1611,7 +1728,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1611int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1728int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1612 u32 slot_id) 1729 u32 slot_id)
1613{ 1730{
1614 return queue_command(xhci, in_ctx_ptr, 0, 0, 1731 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1732 upper_32_bits(in_ctx_ptr), 0,
1615 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); 1733 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
1616} 1734}
1617 1735
@@ -1639,10 +1757,23 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1639 u32 type = TRB_TYPE(TRB_SET_DEQ); 1757 u32 type = TRB_TYPE(TRB_SET_DEQ);
1640 1758
1641 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 1759 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
1642 if (addr == 0) 1760 if (addr == 0) {
1643 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 1761 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
1644 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 1762 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
1645 deq_seg, deq_ptr); 1763 deq_seg, deq_ptr);
1646 return queue_command(xhci, (u32) addr | cycle_state, 0, 0, 1764 return 0;
1765 }
1766 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
1767 upper_32_bits(addr), 0,
1647 trb_slot_id | trb_ep_index | type); 1768 trb_slot_id | trb_ep_index | type);
1648} 1769}
1770
1771int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1772 unsigned int ep_index)
1773{
1774 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
1775 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1776 u32 type = TRB_TYPE(TRB_RESET_EP);
1777
1778 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type);
1779}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8936eeb5588b..d31d32206ba3 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/usb.h> 26#include <linux/usb.h>
27#include <linux/timer.h> 27#include <linux/timer.h>
28#include <linux/kernel.h>
28 29
29#include "../core/hcd.h" 30#include "../core/hcd.h"
30/* Code sharing between pci-quirks and xhci hcd */ 31/* Code sharing between pci-quirks and xhci hcd */
@@ -42,14 +43,6 @@
42 * xHCI register interface. 43 * xHCI register interface.
43 * This corresponds to the eXtensible Host Controller Interface (xHCI) 44 * This corresponds to the eXtensible Host Controller Interface (xHCI)
44 * Revision 0.95 specification 45 * Revision 0.95 specification
45 *
46 * Registers should always be accessed with double word or quad word accesses.
47 *
48 * Some xHCI implementations may support 64-bit address pointers. Registers
49 * with 64-bit address pointers should be written to with dword accesses by
50 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
51 * xHCI implementations that do not support 64-bit address pointers will ignore
52 * the high dword, and write order is irrelevant.
53 */ 46 */
54 47
55/** 48/**
@@ -96,6 +89,7 @@ struct xhci_cap_regs {
96#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) 89#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
97/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ 90/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
98/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ 91/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
92#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f)
99 93
100/* HCSPARAMS3 - hcs_params3 - bitmasks */ 94/* HCSPARAMS3 - hcs_params3 - bitmasks */
101/* bits 0:7, Max U1 to U0 latency for the roothub ports */ 95/* bits 0:7, Max U1 to U0 latency for the roothub ports */
@@ -166,10 +160,10 @@ struct xhci_op_regs {
166 u32 reserved1; 160 u32 reserved1;
167 u32 reserved2; 161 u32 reserved2;
168 u32 dev_notification; 162 u32 dev_notification;
169 u32 cmd_ring[2]; 163 u64 cmd_ring;
170 /* rsvd: offset 0x20-2F */ 164 /* rsvd: offset 0x20-2F */
171 u32 reserved3[4]; 165 u32 reserved3[4];
172 u32 dcbaa_ptr[2]; 166 u64 dcbaa_ptr;
173 u32 config_reg; 167 u32 config_reg;
174 /* rsvd: offset 0x3C-3FF */ 168 /* rsvd: offset 0x3C-3FF */
175 u32 reserved4[241]; 169 u32 reserved4[241];
@@ -254,7 +248,7 @@ struct xhci_op_regs {
254#define CMD_RING_RUNNING (1 << 3) 248#define CMD_RING_RUNNING (1 << 3)
255/* bits 4:5 reserved and should be preserved */ 249/* bits 4:5 reserved and should be preserved */
256/* Command Ring pointer - bit mask for the lower 32 bits. */ 250/* Command Ring pointer - bit mask for the lower 32 bits. */
257#define CMD_RING_ADDR_MASK (0xffffffc0) 251#define CMD_RING_RSVD_BITS (0x3f)
258 252
259/* CONFIG - Configure Register - config_reg bitmasks */ 253/* CONFIG - Configure Register - config_reg bitmasks */
260/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ 254/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
@@ -382,8 +376,8 @@ struct xhci_intr_reg {
382 u32 irq_control; 376 u32 irq_control;
383 u32 erst_size; 377 u32 erst_size;
384 u32 rsvd; 378 u32 rsvd;
385 u32 erst_base[2]; 379 u64 erst_base;
386 u32 erst_dequeue[2]; 380 u64 erst_dequeue;
387}; 381};
388 382
389/* irq_pending bitmasks */ 383/* irq_pending bitmasks */
@@ -453,6 +447,27 @@ struct xhci_doorbell_array {
453 447
454 448
455/** 449/**
450 * struct xhci_container_ctx
451 * @type: Type of context. Used to calculated offsets to contained contexts.
452 * @size: Size of the context data
453 * @bytes: The raw context data given to HW
454 * @dma: dma address of the bytes
455 *
456 * Represents either a Device or Input context. Holds a pointer to the raw
457 * memory used for the context (bytes) and dma address of it (dma).
458 */
459struct xhci_container_ctx {
460 unsigned type;
461#define XHCI_CTX_TYPE_DEVICE 0x1
462#define XHCI_CTX_TYPE_INPUT 0x2
463
464 int size;
465
466 u8 *bytes;
467 dma_addr_t dma;
468};
469
470/**
456 * struct xhci_slot_ctx 471 * struct xhci_slot_ctx
457 * @dev_info: Route string, device speed, hub info, and last valid endpoint 472 * @dev_info: Route string, device speed, hub info, and last valid endpoint
458 * @dev_info2: Max exit latency for device number, root hub port number 473 * @dev_info2: Max exit latency for device number, root hub port number
@@ -538,7 +553,7 @@ struct xhci_slot_ctx {
538struct xhci_ep_ctx { 553struct xhci_ep_ctx {
539 u32 ep_info; 554 u32 ep_info;
540 u32 ep_info2; 555 u32 ep_info2;
541 u32 deq[2]; 556 u64 deq;
542 u32 tx_info; 557 u32 tx_info;
543 /* offset 0x14 - 0x1f reserved for HC internal use */ 558 /* offset 0x14 - 0x1f reserved for HC internal use */
544 u32 reserved[3]; 559 u32 reserved[3];
@@ -589,18 +604,16 @@ struct xhci_ep_ctx {
589 604
590 605
591/** 606/**
592 * struct xhci_device_control 607 * struct xhci_input_control_context
593 * Input/Output context; see section 6.2.5. 608 * Input control context; see section 6.2.5.
594 * 609 *
595 * @drop_context: set the bit of the endpoint context you want to disable 610 * @drop_context: set the bit of the endpoint context you want to disable
596 * @add_context: set the bit of the endpoint context you want to enable 611 * @add_context: set the bit of the endpoint context you want to enable
597 */ 612 */
598struct xhci_device_control { 613struct xhci_input_control_ctx {
599 u32 drop_flags; 614 u32 drop_flags;
600 u32 add_flags; 615 u32 add_flags;
601 u32 rsvd[6]; 616 u32 rsvd2[6];
602 struct xhci_slot_ctx slot;
603 struct xhci_ep_ctx ep[31];
604}; 617};
605 618
606/* drop context bitmasks */ 619/* drop context bitmasks */
@@ -608,7 +621,6 @@ struct xhci_device_control {
608/* add context bitmasks */ 621/* add context bitmasks */
609#define ADD_EP(x) (0x1 << x) 622#define ADD_EP(x) (0x1 << x)
610 623
611
612struct xhci_virt_device { 624struct xhci_virt_device {
613 /* 625 /*
614 * Commands to the hardware are passed an "input context" that 626 * Commands to the hardware are passed an "input context" that
@@ -618,11 +630,10 @@ struct xhci_virt_device {
618 * track of input and output contexts separately because 630 * track of input and output contexts separately because
619 * these commands might fail and we don't trust the hardware. 631 * these commands might fail and we don't trust the hardware.
620 */ 632 */
621 struct xhci_device_control *out_ctx; 633 struct xhci_container_ctx *out_ctx;
622 dma_addr_t out_ctx_dma;
623 /* Used for addressing devices and configuration changes */ 634 /* Used for addressing devices and configuration changes */
624 struct xhci_device_control *in_ctx; 635 struct xhci_container_ctx *in_ctx;
625 dma_addr_t in_ctx_dma; 636
626 /* FIXME when stream support is added */ 637 /* FIXME when stream support is added */
627 struct xhci_ring *ep_rings[31]; 638 struct xhci_ring *ep_rings[31];
628 /* Temporary storage in case the configure endpoint command fails and we 639 /* Temporary storage in case the configure endpoint command fails and we
@@ -641,7 +652,7 @@ struct xhci_virt_device {
641 */ 652 */
642struct xhci_device_context_array { 653struct xhci_device_context_array {
643 /* 64-bit device addresses; we only write 32-bit addresses */ 654 /* 64-bit device addresses; we only write 32-bit addresses */
644 u32 dev_context_ptrs[2*MAX_HC_SLOTS]; 655 u64 dev_context_ptrs[MAX_HC_SLOTS];
645 /* private xHCD pointers */ 656 /* private xHCD pointers */
646 dma_addr_t dma; 657 dma_addr_t dma;
647}; 658};
@@ -654,7 +665,7 @@ struct xhci_device_context_array {
654 665
655struct xhci_stream_ctx { 666struct xhci_stream_ctx {
656 /* 64-bit stream ring address, cycle state, and stream type */ 667 /* 64-bit stream ring address, cycle state, and stream type */
657 u32 stream_ring[2]; 668 u64 stream_ring;
658 /* offset 0x14 - 0x1f reserved for HC internal use */ 669 /* offset 0x14 - 0x1f reserved for HC internal use */
659 u32 reserved[2]; 670 u32 reserved[2];
660}; 671};
@@ -662,7 +673,7 @@ struct xhci_stream_ctx {
662 673
663struct xhci_transfer_event { 674struct xhci_transfer_event {
664 /* 64-bit buffer address, or immediate data */ 675 /* 64-bit buffer address, or immediate data */
665 u32 buffer[2]; 676 u64 buffer;
666 u32 transfer_len; 677 u32 transfer_len;
667 /* This field is interpreted differently based on the type of TRB */ 678 /* This field is interpreted differently based on the type of TRB */
668 u32 flags; 679 u32 flags;
@@ -744,7 +755,7 @@ struct xhci_transfer_event {
744 755
745struct xhci_link_trb { 756struct xhci_link_trb {
746 /* 64-bit segment pointer*/ 757 /* 64-bit segment pointer*/
747 u32 segment_ptr[2]; 758 u64 segment_ptr;
748 u32 intr_target; 759 u32 intr_target;
749 u32 control; 760 u32 control;
750}; 761};
@@ -755,7 +766,7 @@ struct xhci_link_trb {
755/* Command completion event TRB */ 766/* Command completion event TRB */
756struct xhci_event_cmd { 767struct xhci_event_cmd {
757 /* Pointer to command TRB, or the value passed by the event data trb */ 768 /* Pointer to command TRB, or the value passed by the event data trb */
758 u32 cmd_trb[2]; 769 u64 cmd_trb;
759 u32 status; 770 u32 status;
760 u32 flags; 771 u32 flags;
761}; 772};
@@ -848,8 +859,8 @@ union xhci_trb {
848#define TRB_CONFIG_EP 12 859#define TRB_CONFIG_EP 12
849/* Evaluate Context Command */ 860/* Evaluate Context Command */
850#define TRB_EVAL_CONTEXT 13 861#define TRB_EVAL_CONTEXT 13
851/* Reset Transfer Ring Command */ 862/* Reset Endpoint Command */
852#define TRB_RESET_RING 14 863#define TRB_RESET_EP 14
853/* Stop Transfer Ring Command */ 864/* Stop Transfer Ring Command */
854#define TRB_STOP_RING 15 865#define TRB_STOP_RING 15
855/* Set Transfer Ring Dequeue Pointer Command */ 866/* Set Transfer Ring Dequeue Pointer Command */
@@ -929,6 +940,7 @@ struct xhci_ring {
929 unsigned int cancels_pending; 940 unsigned int cancels_pending;
930 unsigned int state; 941 unsigned int state;
931#define SET_DEQ_PENDING (1 << 0) 942#define SET_DEQ_PENDING (1 << 0)
943#define EP_HALTED (1 << 1)
932 /* The TRB that was last reported in a stopped endpoint ring */ 944 /* The TRB that was last reported in a stopped endpoint ring */
933 union xhci_trb *stopped_trb; 945 union xhci_trb *stopped_trb;
934 struct xhci_td *stopped_td; 946 struct xhci_td *stopped_td;
@@ -940,9 +952,15 @@ struct xhci_ring {
940 u32 cycle_state; 952 u32 cycle_state;
941}; 953};
942 954
955struct xhci_dequeue_state {
956 struct xhci_segment *new_deq_seg;
957 union xhci_trb *new_deq_ptr;
958 int new_cycle_state;
959};
960
943struct xhci_erst_entry { 961struct xhci_erst_entry {
944 /* 64-bit event ring segment address */ 962 /* 64-bit event ring segment address */
945 u32 seg_addr[2]; 963 u64 seg_addr;
946 u32 seg_size; 964 u32 seg_size;
947 /* Set to zero */ 965 /* Set to zero */
948 u32 rsvd; 966 u32 rsvd;
@@ -957,6 +975,13 @@ struct xhci_erst {
957 unsigned int erst_size; 975 unsigned int erst_size;
958}; 976};
959 977
978struct xhci_scratchpad {
979 u64 *sp_array;
980 dma_addr_t sp_dma;
981 void **sp_buffers;
982 dma_addr_t *sp_dma_buffers;
983};
984
960/* 985/*
961 * Each segment table entry is 4*32bits long. 1K seems like an ok size: 986 * Each segment table entry is 4*32bits long. 1K seems like an ok size:
962 * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, 987 * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
@@ -1011,6 +1036,9 @@ struct xhci_hcd {
1011 struct xhci_ring *cmd_ring; 1036 struct xhci_ring *cmd_ring;
1012 struct xhci_ring *event_ring; 1037 struct xhci_ring *event_ring;
1013 struct xhci_erst erst; 1038 struct xhci_erst erst;
1039 /* Scratchpad */
1040 struct xhci_scratchpad *scratchpad;
1041
1014 /* slot enabling and address device helpers */ 1042 /* slot enabling and address device helpers */
1015 struct completion addr_dev; 1043 struct completion addr_dev;
1016 int slot_id; 1044 int slot_id;
@@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
1071static inline void xhci_writel(struct xhci_hcd *xhci, 1099static inline void xhci_writel(struct xhci_hcd *xhci,
1072 const unsigned int val, __u32 __iomem *regs) 1100 const unsigned int val, __u32 __iomem *regs)
1073{ 1101{
1074 if (!in_interrupt()) 1102 xhci_dbg(xhci,
1075 xhci_dbg(xhci, 1103 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
1076 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", 1104 regs, val);
1077 regs, val);
1078 writel(val, regs); 1105 writel(val, regs);
1079} 1106}
1080 1107
1108/*
1109 * Registers should always be accessed with double word or quad word accesses.
1110 *
1111 * Some xHCI implementations may support 64-bit address pointers. Registers
1112 * with 64-bit address pointers should be written to with dword accesses by
1113 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
1114 * xHCI implementations that do not support 64-bit address pointers will ignore
1115 * the high dword, and write order is irrelevant.
1116 */
1117static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
1118 __u64 __iomem *regs)
1119{
1120 __u32 __iomem *ptr = (__u32 __iomem *) regs;
1121 u64 val_lo = readl(ptr);
1122 u64 val_hi = readl(ptr + 1);
1123 return val_lo + (val_hi << 32);
1124}
1125static inline void xhci_write_64(struct xhci_hcd *xhci,
1126 const u64 val, __u64 __iomem *regs)
1127{
1128 __u32 __iomem *ptr = (__u32 __iomem *) regs;
1129 u32 val_lo = lower_32_bits(val);
1130 u32 val_hi = upper_32_bits(val);
1131
1132 xhci_dbg(xhci,
1133 "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
1134 regs, (long unsigned int) val);
1135 writel(val_lo, ptr);
1136 writel(val_hi, ptr + 1);
1137}
1138
1081/* xHCI debugging */ 1139/* xHCI debugging */
1082void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); 1140void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
1083void xhci_print_registers(struct xhci_hcd *xhci); 1141void xhci_print_registers(struct xhci_hcd *xhci);
@@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
1090void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); 1148void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
1091void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); 1149void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
1092void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); 1150void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1093void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); 1151void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
1094 1152
1095/* xHCI memory managment */ 1153/* xHCI memory managment */
1096void xhci_mem_cleanup(struct xhci_hcd *xhci); 1154void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
1128int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); 1186int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
1129int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1187int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1130int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1188int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1189void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
1131int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1190int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1132void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1191void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1133 1192
@@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1148 int slot_id, unsigned int ep_index); 1207 int slot_id, unsigned int ep_index);
1149int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1208int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1150 u32 slot_id); 1209 u32 slot_id);
1210int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1211 unsigned int ep_index);
1212void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
1213 unsigned int slot_id, unsigned int ep_index,
1214 struct xhci_td *cur_td, struct xhci_dequeue_state *state);
1215void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
1216 struct xhci_ring *ep_ring, unsigned int slot_id,
1217 unsigned int ep_index, struct xhci_dequeue_state *deq_state);
1151 1218
1152/* xHCI roothub code */ 1219/* xHCI roothub code */
1153int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1220int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1154 char *buf, u16 wLength); 1221 char *buf, u16 wLength);
1155int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); 1222int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1156 1223
1224/* xHCI contexts */
1225struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1226struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1227struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
1228
1157#endif /* __LINUX_XHCI_HCD_H */ 1229#endif /* __LINUX_XHCI_HCD_H */
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index a68d91a11bee..abe3aa67ed00 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -220,7 +220,7 @@ config USB_IOWARRIOR
220 220
221config USB_TEST 221config USB_TEST
222 tristate "USB testing driver" 222 tristate "USB testing driver"
223 depends on USB && USB_DEVICEFS 223 depends on USB
224 help 224 help
225 This driver is for testing host controller software. It is used 225 This driver is for testing host controller software. It is used
226 with specialized device firmware for regression and stress testing, 226 with specialized device firmware for regression and stress testing,
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 3c5fe5cee05a..90e1a8dedfa9 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/smp_lock.h>
21#include <linux/poll.h> 22#include <linux/poll.h>
22#include <linux/usb/iowarrior.h> 23#include <linux/usb/iowarrior.h>
23 24
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index deb95bb49fd1..d645f3899fe1 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -32,6 +32,7 @@
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/signal.h> 33#include <linux/signal.h>
34#include <linux/sched.h> 34#include <linux/sched.h>
35#include <linux/smp_lock.h>
35#include <linux/errno.h> 36#include <linux/errno.h>
36#include <linux/random.h> 37#include <linux/random.h>
37#include <linux/poll.h> 38#include <linux/poll.h>
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index e0ff9ccd866b..29092b8e59ce 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -16,6 +16,7 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/smp_lock.h>
19#include <linux/errno.h> 20#include <linux/errno.h>
20#include <linux/mutex.h> 21#include <linux/mutex.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f8d9045d668a..0f7a30b7d2d1 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1261,7 +1261,7 @@ static int mon_alloc_buff(struct mon_pgmap *map, int npages)
1261 return -ENOMEM; 1261 return -ENOMEM;
1262 } 1262 }
1263 map[n].ptr = (unsigned char *) vaddr; 1263 map[n].ptr = (unsigned char *) vaddr;
1264 map[n].pg = virt_to_page(vaddr); 1264 map[n].pg = virt_to_page((void *) vaddr);
1265 } 1265 }
1266 return 0; 1266 return 0;
1267} 1267}
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 70073b157f0a..803adcb5ac1d 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -12,6 +12,7 @@ config USB_MUSB_HDRC
12 depends on !SUPERH 12 depends on !SUPERH
13 select NOP_USB_XCEIV if ARCH_DAVINCI 13 select NOP_USB_XCEIV if ARCH_DAVINCI
14 select TWL4030_USB if MACH_OMAP_3430SDP 14 select TWL4030_USB if MACH_OMAP_3430SDP
15 select NOP_USB_XCEIV if MACH_OMAP3EVM
15 select USB_OTG_UTILS 16 select USB_OTG_UTILS
16 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 17 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
17 help 18 help
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
index 8a39de3e6e47..59bf949e589b 100644
--- a/drivers/usb/musb/cppi_dma.h
+++ b/drivers/usb/musb/cppi_dma.h
@@ -5,7 +5,6 @@
5 5
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/list.h> 7#include <linux/list.h>
8#include <linux/smp_lock.h>
9#include <linux/errno.h> 8#include <linux/errno.h>
10#include <linux/dmapool.h> 9#include <linux/dmapool.h>
11 10
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 180d7daa4099..e16ff605c458 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -35,13 +35,14 @@
35#include <mach/hardware.h> 35#include <mach/hardware.h>
36#include <mach/memory.h> 36#include <mach/memory.h>
37#include <mach/gpio.h> 37#include <mach/gpio.h>
38#include <mach/cputype.h>
38 39
39#include <asm/mach-types.h> 40#include <asm/mach-types.h>
40 41
41#include "musb_core.h" 42#include "musb_core.h"
42 43
43#ifdef CONFIG_MACH_DAVINCI_EVM 44#ifdef CONFIG_MACH_DAVINCI_EVM
44#define GPIO_nVBUS_DRV 87 45#define GPIO_nVBUS_DRV 144
45#endif 46#endif
46 47
47#include "davinci.h" 48#include "davinci.h"
@@ -329,7 +330,6 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
329 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 330 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
330 WARNING("VBUS error workaround (delay coming)\n"); 331 WARNING("VBUS error workaround (delay coming)\n");
331 } else if (is_host_enabled(musb) && drvvbus) { 332 } else if (is_host_enabled(musb) && drvvbus) {
332 musb->is_active = 1;
333 MUSB_HST_MODE(musb); 333 MUSB_HST_MODE(musb);
334 musb->xceiv->default_a = 1; 334 musb->xceiv->default_a = 1;
335 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 335 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
@@ -343,7 +343,9 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
343 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); 343 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
344 } 344 }
345 345
346 /* NOTE: this must complete poweron within 100 msec */ 346 /* NOTE: this must complete poweron within 100 msec
347 * (OTG_TIME_A_WAIT_VRISE) but we don't check for that.
348 */
347 davinci_source_power(musb, drvvbus, 0); 349 davinci_source_power(musb, drvvbus, 0);
348 DBG(2, "VBUS %s (%s)%s, devctl %02x\n", 350 DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
349 drvvbus ? "on" : "off", 351 drvvbus ? "on" : "off",
@@ -411,6 +413,21 @@ int __init musb_platform_init(struct musb *musb)
411 __raw_writel(phy_ctrl, USB_PHY_CTRL); 413 __raw_writel(phy_ctrl, USB_PHY_CTRL);
412 } 414 }
413 415
416 /* On dm355, the default-A state machine needs DRVVBUS control.
417 * If we won't be a host, there's no need to turn it on.
418 */
419 if (cpu_is_davinci_dm355()) {
420 u32 deepsleep = __raw_readl(DM355_DEEPSLEEP);
421
422 if (is_host_enabled(musb)) {
423 deepsleep &= ~DRVVBUS_OVERRIDE;
424 } else {
425 deepsleep &= ~DRVVBUS_FORCE;
426 deepsleep |= DRVVBUS_OVERRIDE;
427 }
428 __raw_writel(deepsleep, DM355_DEEPSLEEP);
429 }
430
414 /* reset the controller */ 431 /* reset the controller */
415 musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); 432 musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
416 433
@@ -437,6 +454,15 @@ int musb_platform_exit(struct musb *musb)
437 if (is_host_enabled(musb)) 454 if (is_host_enabled(musb))
438 del_timer_sync(&otg_workaround); 455 del_timer_sync(&otg_workaround);
439 456
457 /* force VBUS off */
458 if (cpu_is_davinci_dm355()) {
459 u32 deepsleep = __raw_readl(DM355_DEEPSLEEP);
460
461 deepsleep &= ~DRVVBUS_FORCE;
462 deepsleep |= DRVVBUS_OVERRIDE;
463 __raw_writel(deepsleep, DM355_DEEPSLEEP);
464 }
465
440 davinci_source_power(musb, 0 /*off*/, 1); 466 davinci_source_power(musb, 0 /*off*/, 1);
441 467
442 /* delay, to avoid problems with module reload */ 468 /* delay, to avoid problems with module reload */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 554a414f65d1..c7c1ca0494cd 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1326 int i; 1326 int i;
1327 1327
1328 /* log core options (read using indexed model) */ 1328 /* log core options (read using indexed model) */
1329 musb_ep_select(mbase, 0);
1330 reg = musb_read_configdata(mbase); 1329 reg = musb_read_configdata(mbase);
1331 1330
1332 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); 1331 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
@@ -1990,7 +1989,7 @@ bad_config:
1990 if (status < 0) 1989 if (status < 0)
1991 goto fail2; 1990 goto fail2;
1992 1991
1993#ifdef CONFIG_USB_OTG 1992#ifdef CONFIG_USB_MUSB_OTG
1994 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); 1993 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
1995#endif 1994#endif
1996 1995
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index f3772ca3b2cf..381d648a36b8 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -38,7 +38,6 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/list.h> 39#include <linux/list.h>
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <linux/smp_lock.h>
42#include <linux/errno.h> 41#include <linux/errno.h>
43#include <linux/timer.h> 42#include <linux/timer.h>
44#include <linux/clk.h> 43#include <linux/clk.h>
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 40ed50ecedff..7a6778675ad3 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -407,7 +407,7 @@ stall:
407 csr |= MUSB_RXCSR_P_SENDSTALL 407 csr |= MUSB_RXCSR_P_SENDSTALL
408 | MUSB_RXCSR_FLUSHFIFO 408 | MUSB_RXCSR_FLUSHFIFO
409 | MUSB_RXCSR_CLRDATATOG 409 | MUSB_RXCSR_CLRDATATOG
410 | MUSB_TXCSR_P_WZC_BITS; 410 | MUSB_RXCSR_P_WZC_BITS;
411 musb_writew(regs, MUSB_RXCSR, 411 musb_writew(regs, MUSB_RXCSR,
412 csr); 412 csr);
413 } 413 }
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 94a2a350a414..cf94511485f2 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -373,7 +373,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
373 musb_save_toggle(qh, is_in, urb); 373 musb_save_toggle(qh, is_in, urb);
374 break; 374 break;
375 case USB_ENDPOINT_XFER_ISOC: 375 case USB_ENDPOINT_XFER_ISOC:
376 if (urb->error_count) 376 if (status == 0 && urb->error_count)
377 status = -EXDEV; 377 status = -EXDEV;
378 break; 378 break;
379 } 379 }
@@ -2235,13 +2235,30 @@ static void musb_h_stop(struct usb_hcd *hcd)
2235static int musb_bus_suspend(struct usb_hcd *hcd) 2235static int musb_bus_suspend(struct usb_hcd *hcd)
2236{ 2236{
2237 struct musb *musb = hcd_to_musb(hcd); 2237 struct musb *musb = hcd_to_musb(hcd);
2238 u8 devctl;
2238 2239
2239 if (musb->xceiv->state == OTG_STATE_A_SUSPEND) 2240 if (!is_host_active(musb))
2240 return 0; 2241 return 0;
2241 2242
2242 if (is_host_active(musb) && musb->is_active) { 2243 switch (musb->xceiv->state) {
2243 WARNING("trying to suspend as %s is_active=%i\n", 2244 case OTG_STATE_A_SUSPEND:
2244 otg_state_string(musb), musb->is_active); 2245 return 0;
2246 case OTG_STATE_A_WAIT_VRISE:
2247 /* ID could be grounded even if there's no device
2248 * on the other end of the cable. NOTE that the
2249 * A_WAIT_VRISE timers are messy with MUSB...
2250 */
2251 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2252 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2253 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2254 break;
2255 default:
2256 break;
2257 }
2258
2259 if (musb->is_active) {
2260 WARNING("trying to suspend as %s while active\n",
2261 otg_state_string(musb));
2245 return -EBUSY; 2262 return -EBUSY;
2246 } else 2263 } else
2247 return 0; 2264 return 0;
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index de3b2f18db44..fbfd3fd9ce1f 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -323,6 +323,7 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
323 323
324static inline u8 musb_read_configdata(void __iomem *mbase) 324static inline u8 musb_read_configdata(void __iomem *mbase)
325{ 325{
326 musb_writeb(mbase, MUSB_INDEX, 0);
326 return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); 327 return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
327} 328}
328 329
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 69feeec1628c..aa884d072f0b 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -59,18 +59,4 @@ config NOP_USB_XCEIV
59 built-in with usb ip or which are autonomous and doesn't require any 59 built-in with usb ip or which are autonomous and doesn't require any
60 phy programming such as ISP1x04 etc. 60 phy programming such as ISP1x04 etc.
61 61
62config USB_LANGWELL_OTG
63 tristate "Intel Langwell USB OTG dual-role support"
64 depends on USB && MRST
65 select USB_OTG
66 select USB_OTG_UTILS
67 help
68 Say Y here if you want to build Intel Langwell USB OTG
69 transciever driver in kernel. This driver implements role
70 switch between EHCI host driver and Langwell USB OTG
71 client driver.
72
73 To compile this driver as a module, choose M here: the
74 module will be called langwell_otg.
75
76endif # USB || OTG 62endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 6d1abdd3c0ac..208167856529 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_USB_OTG_UTILS) += otg.o
9obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o 9obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
10obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o 10obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
11obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o 11obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
12obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
13obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o 12obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
14 13
15ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG 14ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
deleted file mode 100644
index 6f628d0e9f39..000000000000
--- a/drivers/usb/otg/langwell_otg.c
+++ /dev/null
@@ -1,1915 +0,0 @@
1/*
2 * Intel Langwell USB OTG transceiver driver
3 * Copyright (C) 2008 - 2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19/* This driver helps to switch Langwell OTG controller function between host
20 * and peripheral. It works with EHCI driver and Langwell client controller
21 * driver together.
22 */
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/pci.h>
26#include <linux/errno.h>
27#include <linux/interrupt.h>
28#include <linux/kernel.h>
29#include <linux/device.h>
30#include <linux/moduleparam.h>
31#include <linux/usb/ch9.h>
32#include <linux/usb/gadget.h>
33#include <linux/usb.h>
34#include <linux/usb/otg.h>
35#include <linux/notifier.h>
36#include <asm/ipc_defs.h>
37#include <linux/delay.h>
38#include "../core/hcd.h"
39
40#include <linux/usb/langwell_otg.h>
41
42#define DRIVER_DESC "Intel Langwell USB OTG transceiver driver"
43#define DRIVER_VERSION "3.0.0.32L.0002"
44
45MODULE_DESCRIPTION(DRIVER_DESC);
46MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
47MODULE_VERSION(DRIVER_VERSION);
48MODULE_LICENSE("GPL");
49
50static const char driver_name[] = "langwell_otg";
51
52static int langwell_otg_probe(struct pci_dev *pdev,
53 const struct pci_device_id *id);
54static void langwell_otg_remove(struct pci_dev *pdev);
55static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
56static int langwell_otg_resume(struct pci_dev *pdev);
57
58static int langwell_otg_set_host(struct otg_transceiver *otg,
59 struct usb_bus *host);
60static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
61 struct usb_gadget *gadget);
62static int langwell_otg_start_srp(struct otg_transceiver *otg);
63
64static const struct pci_device_id pci_ids[] = {{
65 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
66 .class_mask = ~0,
67 .vendor = 0x8086,
68 .device = 0x0811,
69 .subvendor = PCI_ANY_ID,
70 .subdevice = PCI_ANY_ID,
71}, { /* end: all zeroes */ }
72};
73
74static struct pci_driver otg_pci_driver = {
75 .name = (char *) driver_name,
76 .id_table = pci_ids,
77
78 .probe = langwell_otg_probe,
79 .remove = langwell_otg_remove,
80
81 .suspend = langwell_otg_suspend,
82 .resume = langwell_otg_resume,
83};
84
85static const char *state_string(enum usb_otg_state state)
86{
87 switch (state) {
88 case OTG_STATE_A_IDLE:
89 return "a_idle";
90 case OTG_STATE_A_WAIT_VRISE:
91 return "a_wait_vrise";
92 case OTG_STATE_A_WAIT_BCON:
93 return "a_wait_bcon";
94 case OTG_STATE_A_HOST:
95 return "a_host";
96 case OTG_STATE_A_SUSPEND:
97 return "a_suspend";
98 case OTG_STATE_A_PERIPHERAL:
99 return "a_peripheral";
100 case OTG_STATE_A_WAIT_VFALL:
101 return "a_wait_vfall";
102 case OTG_STATE_A_VBUS_ERR:
103 return "a_vbus_err";
104 case OTG_STATE_B_IDLE:
105 return "b_idle";
106 case OTG_STATE_B_SRP_INIT:
107 return "b_srp_init";
108 case OTG_STATE_B_PERIPHERAL:
109 return "b_peripheral";
110 case OTG_STATE_B_WAIT_ACON:
111 return "b_wait_acon";
112 case OTG_STATE_B_HOST:
113 return "b_host";
114 default:
115 return "UNDEFINED";
116 }
117}
118
119/* HSM timers */
120static inline struct langwell_otg_timer *otg_timer_initializer
121(void (*function)(unsigned long), unsigned long expires, unsigned long data)
122{
123 struct langwell_otg_timer *timer;
124 timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL);
125 timer->function = function;
126 timer->expires = expires;
127 timer->data = data;
128 return timer;
129}
130
131static struct langwell_otg_timer *a_wait_vrise_tmr, *a_wait_bcon_tmr,
132 *a_aidl_bdis_tmr, *b_ase0_brst_tmr, *b_se0_srp_tmr, *b_srp_res_tmr,
133 *b_bus_suspend_tmr;
134
135static struct list_head active_timers;
136
137static struct langwell_otg *the_transceiver;
138
139/* host/client notify transceiver when event affects HNP state */
140void langwell_update_transceiver()
141{
142 otg_dbg("transceiver driver is notified\n");
143 queue_work(the_transceiver->qwork, &the_transceiver->work);
144}
145EXPORT_SYMBOL(langwell_update_transceiver);
146
147static int langwell_otg_set_host(struct otg_transceiver *otg,
148 struct usb_bus *host)
149{
150 otg->host = host;
151
152 return 0;
153}
154
155static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
156 struct usb_gadget *gadget)
157{
158 otg->gadget = gadget;
159
160 return 0;
161}
162
163static int langwell_otg_set_power(struct otg_transceiver *otg,
164 unsigned mA)
165{
166 return 0;
167}
168
169/* A-device drives vbus, controlled through PMIC CHRGCNTL register*/
170static void langwell_otg_drv_vbus(int on)
171{
172 struct ipc_pmic_reg_data pmic_data = {0};
173 struct ipc_pmic_reg_data battery_data;
174
175 /* Check if battery is attached or not */
176 battery_data.pmic_reg_data[0].register_address = 0xd2;
177 battery_data.ioc = 0;
178 battery_data.num_entries = 1;
179 if (ipc_pmic_register_read(&battery_data)) {
180 otg_dbg("Failed to read PMIC register 0xd2.\n");
181 return;
182 }
183
184 if ((battery_data.pmic_reg_data[0].value & 0x20) == 0) {
185 otg_dbg("no battery attached\n");
186 return;
187 }
188
189 /* Workaround for battery attachment issue */
190 if (battery_data.pmic_reg_data[0].value == 0x34) {
191 otg_dbg("battery \n");
192 return;
193 }
194
195 otg_dbg("battery attached\n");
196
197 pmic_data.ioc = 0;
198 pmic_data.pmic_reg_data[0].register_address = 0xD4;
199 pmic_data.num_entries = 1;
200 if (on)
201 pmic_data.pmic_reg_data[0].value = 0x20;
202 else
203 pmic_data.pmic_reg_data[0].value = 0xc0;
204
205 if (ipc_pmic_register_write(&pmic_data, TRUE))
206 otg_dbg("Failed to write PMIC.\n");
207
208}
209
210/* charge vbus or discharge vbus through a resistor to ground */
211static void langwell_otg_chrg_vbus(int on)
212{
213
214 u32 val;
215
216 val = readl(the_transceiver->regs + CI_OTGSC);
217
218 if (on)
219 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC,
220 the_transceiver->regs + CI_OTGSC);
221 else
222 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD,
223 the_transceiver->regs + CI_OTGSC);
224
225}
226
227/* Start SRP */
228static int langwell_otg_start_srp(struct otg_transceiver *otg)
229{
230 u32 val;
231
232 otg_dbg("Start SRP ->\n");
233
234 val = readl(the_transceiver->regs + CI_OTGSC);
235
236 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
237 the_transceiver->regs + CI_OTGSC);
238
239 /* Check if the data plus is finished or not */
240 msleep(8);
241 val = readl(the_transceiver->regs + CI_OTGSC);
242 if (val & (OTGSC_HADP | OTGSC_DP))
243 otg_dbg("DataLine SRP Error\n");
244
245 /* FIXME: VBus SRP */
246
247 return 0;
248}
249
250
251/* stop SOF via bus_suspend */
252static void langwell_otg_loc_sof(int on)
253{
254 struct usb_hcd *hcd;
255 int err;
256
257 otg_dbg("loc_sof -> %d\n", on);
258
259 hcd = bus_to_hcd(the_transceiver->otg.host);
260 if (on)
261 err = hcd->driver->bus_resume(hcd);
262 else
263 err = hcd->driver->bus_suspend(hcd);
264
265 if (err)
266 otg_dbg("Failed to resume/suspend bus - %d\n", err);
267}
268
269static void langwell_otg_phy_low_power(int on)
270{
271 u32 val;
272
273 otg_dbg("phy low power mode-> %d\n", on);
274
275 val = readl(the_transceiver->regs + CI_HOSTPC1);
276 if (on)
277 writel(val | HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1);
278 else
279 writel(val & ~HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1);
280}
281
282/* Enable/Disable OTG interrupt */
283static void langwell_otg_intr(int on)
284{
285 u32 val;
286
287 otg_dbg("interrupt -> %d\n", on);
288
289 val = readl(the_transceiver->regs + CI_OTGSC);
290 if (on) {
291 val = val | (OTGSC_INTEN_MASK | OTGSC_IDPU);
292 writel(val, the_transceiver->regs + CI_OTGSC);
293 } else {
294 val = val & ~(OTGSC_INTEN_MASK | OTGSC_IDPU);
295 writel(val, the_transceiver->regs + CI_OTGSC);
296 }
297}
298
299/* set HAAR: Hardware Assist Auto-Reset */
300static void langwell_otg_HAAR(int on)
301{
302 u32 val;
303
304 otg_dbg("HAAR -> %d\n", on);
305
306 val = readl(the_transceiver->regs + CI_OTGSC);
307 if (on)
308 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
309 the_transceiver->regs + CI_OTGSC);
310 else
311 writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
312 the_transceiver->regs + CI_OTGSC);
313}
314
315/* set HABA: Hardware Assist B-Disconnect to A-Connect */
316static void langwell_otg_HABA(int on)
317{
318 u32 val;
319
320 otg_dbg("HABA -> %d\n", on);
321
322 val = readl(the_transceiver->regs + CI_OTGSC);
323 if (on)
324 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
325 the_transceiver->regs + CI_OTGSC);
326 else
327 writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
328 the_transceiver->regs + CI_OTGSC);
329}
330
331static int langwell_otg_check_se0_srp(int on)
332{
333 u32 val;
334
335 int delay_time = TB_SE0_SRP * 10; /* step is 100us */
336
337 otg_dbg("check_se0_srp -> \n");
338
339 do {
340 udelay(100);
341 if (!delay_time--)
342 break;
343 val = readl(the_transceiver->regs + CI_PORTSC1);
344 val &= PORTSC_LS;
345 } while (!val);
346
347 otg_dbg("check_se0_srp <- \n");
348 return val;
349}
350
351/* The timeout callback function to set time out bit */
352static void set_tmout(unsigned long indicator)
353{
354 *(int *)indicator = 1;
355}
356
357void langwell_otg_nsf_msg(unsigned long indicator)
358{
359 switch (indicator) {
360 case 2:
361 case 4:
362 case 6:
363 case 7:
364 printk(KERN_ERR "OTG:NSF-%lu - deivce not responding\n",
365 indicator);
366 break;
367 case 3:
368 printk(KERN_ERR "OTG:NSF-%lu - deivce not supported\n",
369 indicator);
370 break;
371 default:
372 printk(KERN_ERR "Do not have this kind of NSF\n");
373 break;
374 }
375}
376
377/* Initialize timers */
378static void langwell_otg_init_timers(struct otg_hsm *hsm)
379{
380 /* HSM used timers */
381 a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
382 (unsigned long)&hsm->a_wait_vrise_tmout);
383 a_wait_bcon_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_BCON,
384 (unsigned long)&hsm->a_wait_bcon_tmout);
385 a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
386 (unsigned long)&hsm->a_aidl_bdis_tmout);
387 b_ase0_brst_tmr = otg_timer_initializer(&set_tmout, TB_ASE0_BRST,
388 (unsigned long)&hsm->b_ase0_brst_tmout);
389 b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
390 (unsigned long)&hsm->b_se0_srp);
391 b_srp_res_tmr = otg_timer_initializer(&set_tmout, TB_SRP_RES,
392 (unsigned long)&hsm->b_srp_res_tmout);
393 b_bus_suspend_tmr = otg_timer_initializer(&set_tmout, TB_BUS_SUSPEND,
394 (unsigned long)&hsm->b_bus_suspend_tmout);
395}
396
397/* Free timers */
398static void langwell_otg_free_timers(void)
399{
400 kfree(a_wait_vrise_tmr);
401 kfree(a_wait_bcon_tmr);
402 kfree(a_aidl_bdis_tmr);
403 kfree(b_ase0_brst_tmr);
404 kfree(b_se0_srp_tmr);
405 kfree(b_srp_res_tmr);
406 kfree(b_bus_suspend_tmr);
407}
408
409/* Add timer to timer list */
410static void langwell_otg_add_timer(void *gtimer)
411{
412 struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
413 struct langwell_otg_timer *tmp_timer;
414 u32 val32;
415
416 /* Check if the timer is already in the active list,
417 * if so update timer count
418 */
419 list_for_each_entry(tmp_timer, &active_timers, list)
420 if (tmp_timer == timer) {
421 timer->count = timer->expires;
422 return;
423 }
424 timer->count = timer->expires;
425
426 if (list_empty(&active_timers)) {
427 val32 = readl(the_transceiver->regs + CI_OTGSC);
428 writel(val32 | OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
429 }
430
431 list_add_tail(&timer->list, &active_timers);
432}
433
434/* Remove timer from the timer list; clear timeout status */
435static void langwell_otg_del_timer(void *gtimer)
436{
437 struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
438 struct langwell_otg_timer *tmp_timer, *del_tmp;
439 u32 val32;
440
441 list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
442 if (tmp_timer == timer)
443 list_del(&timer->list);
444
445 if (list_empty(&active_timers)) {
446 val32 = readl(the_transceiver->regs + CI_OTGSC);
447 writel(val32 & ~OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
448 }
449}
450
451/* Reduce timer count by 1, and find timeout conditions.*/
452static int langwell_otg_tick_timer(u32 *int_sts)
453{
454 struct langwell_otg_timer *tmp_timer, *del_tmp;
455 int expired = 0;
456
457 list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
458 tmp_timer->count--;
459 /* check if timer expires */
460 if (!tmp_timer->count) {
461 list_del(&tmp_timer->list);
462 tmp_timer->function(tmp_timer->data);
463 expired = 1;
464 }
465 }
466
467 if (list_empty(&active_timers)) {
468 otg_dbg("tick timer: disable 1ms int\n");
469 *int_sts = *int_sts & ~OTGSC_1MSE;
470 }
471 return expired;
472}
473
474static void reset_otg(void)
475{
476 u32 val;
477 int delay_time = 1000;
478
479 otg_dbg("reseting OTG controller ...\n");
480 val = readl(the_transceiver->regs + CI_USBCMD);
481 writel(val | USBCMD_RST, the_transceiver->regs + CI_USBCMD);
482 do {
483 udelay(100);
484 if (!delay_time--)
485 otg_dbg("reset timeout\n");
486 val = readl(the_transceiver->regs + CI_USBCMD);
487 val &= USBCMD_RST;
488 } while (val != 0);
489 otg_dbg("reset done.\n");
490}
491
492static void set_host_mode(void)
493{
494 u32 val;
495
496 reset_otg();
497 val = readl(the_transceiver->regs + CI_USBMODE);
498 val = (val & (~USBMODE_CM)) | USBMODE_HOST;
499 writel(val, the_transceiver->regs + CI_USBMODE);
500}
501
502static void set_client_mode(void)
503{
504 u32 val;
505
506 reset_otg();
507 val = readl(the_transceiver->regs + CI_USBMODE);
508 val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
509 writel(val, the_transceiver->regs + CI_USBMODE);
510}
511
512static void init_hsm(void)
513{
514 struct langwell_otg *langwell = the_transceiver;
515 u32 val32;
516
517 /* read OTGSC after reset */
518 val32 = readl(langwell->regs + CI_OTGSC);
519 otg_dbg("%s: OTGSC init value = 0x%x\n", __func__, val32);
520
521 /* set init state */
522 if (val32 & OTGSC_ID) {
523 langwell->hsm.id = 1;
524 langwell->otg.default_a = 0;
525 set_client_mode();
526 langwell->otg.state = OTG_STATE_B_IDLE;
527 langwell_otg_drv_vbus(0);
528 } else {
529 langwell->hsm.id = 0;
530 langwell->otg.default_a = 1;
531 set_host_mode();
532 langwell->otg.state = OTG_STATE_A_IDLE;
533 }
534
535 /* set session indicator */
536 if (val32 & OTGSC_BSE)
537 langwell->hsm.b_sess_end = 1;
538 if (val32 & OTGSC_BSV)
539 langwell->hsm.b_sess_vld = 1;
540 if (val32 & OTGSC_ASV)
541 langwell->hsm.a_sess_vld = 1;
542 if (val32 & OTGSC_AVV)
543 langwell->hsm.a_vbus_vld = 1;
544
545 /* defautly power the bus */
546 langwell->hsm.a_bus_req = 1;
547 langwell->hsm.a_bus_drop = 0;
548 /* defautly don't request bus as B device */
549 langwell->hsm.b_bus_req = 0;
550 /* no system error */
551 langwell->hsm.a_clr_err = 0;
552}
553
554static irqreturn_t otg_dummy_irq(int irq, void *_dev)
555{
556 void __iomem *reg_base = _dev;
557 u32 val;
558 u32 int_mask = 0;
559
560 val = readl(reg_base + CI_USBMODE);
561 if ((val & USBMODE_CM) != USBMODE_DEVICE)
562 return IRQ_NONE;
563
564 val = readl(reg_base + CI_USBSTS);
565 int_mask = val & INTR_DUMMY_MASK;
566
567 if (int_mask == 0)
568 return IRQ_NONE;
569
570 /* clear hsm.b_conn here since host driver can't detect it
571 * otg_dummy_irq called means B-disconnect happened.
572 */
573 if (the_transceiver->hsm.b_conn) {
574 the_transceiver->hsm.b_conn = 0;
575 if (spin_trylock(&the_transceiver->wq_lock)) {
576 queue_work(the_transceiver->qwork,
577 &the_transceiver->work);
578 spin_unlock(&the_transceiver->wq_lock);
579 }
580 }
581 /* Clear interrupts */
582 writel(int_mask, reg_base + CI_USBSTS);
583 return IRQ_HANDLED;
584}
585
586static irqreturn_t otg_irq(int irq, void *_dev)
587{
588 struct langwell_otg *langwell = _dev;
589 u32 int_sts, int_en;
590 u32 int_mask = 0;
591 int flag = 0;
592
593 int_sts = readl(langwell->regs + CI_OTGSC);
594 int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
595 int_mask = int_sts & int_en;
596 if (int_mask == 0)
597 return IRQ_NONE;
598
599 if (int_mask & OTGSC_IDIS) {
600 otg_dbg("%s: id change int\n", __func__);
601 langwell->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0;
602 flag = 1;
603 }
604 if (int_mask & OTGSC_DPIS) {
605 otg_dbg("%s: data pulse int\n", __func__);
606 langwell->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
607 flag = 1;
608 }
609 if (int_mask & OTGSC_BSEIS) {
610 otg_dbg("%s: b session end int\n", __func__);
611 langwell->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
612 flag = 1;
613 }
614 if (int_mask & OTGSC_BSVIS) {
615 otg_dbg("%s: b session valid int\n", __func__);
616 langwell->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
617 flag = 1;
618 }
619 if (int_mask & OTGSC_ASVIS) {
620 otg_dbg("%s: a session valid int\n", __func__);
621 langwell->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
622 flag = 1;
623 }
624 if (int_mask & OTGSC_AVVIS) {
625 otg_dbg("%s: a vbus valid int\n", __func__);
626 langwell->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
627 flag = 1;
628 }
629
630 if (int_mask & OTGSC_1MSS) {
631 /* need to schedule otg_work if any timer is expired */
632 if (langwell_otg_tick_timer(&int_sts))
633 flag = 1;
634 }
635
636 writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
637 langwell->regs + CI_OTGSC);
638 if (flag)
639 queue_work(langwell->qwork, &langwell->work);
640
641 return IRQ_HANDLED;
642}
643
644static void langwell_otg_work(struct work_struct *work)
645{
646 struct langwell_otg *langwell = container_of(work,
647 struct langwell_otg, work);
648 int retval;
649
650 otg_dbg("%s: old state = %s\n", __func__,
651 state_string(langwell->otg.state));
652
653 switch (langwell->otg.state) {
654 case OTG_STATE_UNDEFINED:
655 case OTG_STATE_B_IDLE:
656 if (!langwell->hsm.id) {
657 langwell_otg_del_timer(b_srp_res_tmr);
658 langwell->otg.default_a = 1;
659 langwell->hsm.a_srp_det = 0;
660
661 langwell_otg_chrg_vbus(0);
662 langwell_otg_drv_vbus(0);
663
664 set_host_mode();
665 langwell->otg.state = OTG_STATE_A_IDLE;
666 queue_work(langwell->qwork, &langwell->work);
667 } else if (langwell->hsm.b_srp_res_tmout) {
668 langwell->hsm.b_srp_res_tmout = 0;
669 langwell->hsm.b_bus_req = 0;
670 langwell_otg_nsf_msg(6);
671 } else if (langwell->hsm.b_sess_vld) {
672 langwell_otg_del_timer(b_srp_res_tmr);
673 langwell->hsm.b_sess_end = 0;
674 langwell->hsm.a_bus_suspend = 0;
675
676 langwell_otg_chrg_vbus(0);
677 if (langwell->client_ops) {
678 langwell->client_ops->resume(langwell->pdev);
679 langwell->otg.state = OTG_STATE_B_PERIPHERAL;
680 } else
681 otg_dbg("client driver not loaded.\n");
682
683 } else if (langwell->hsm.b_bus_req &&
684 (langwell->hsm.b_sess_end)) {
685 /* workaround for b_se0_srp detection */
686 retval = langwell_otg_check_se0_srp(0);
687 if (retval) {
688 langwell->hsm.b_bus_req = 0;
689 otg_dbg("LS is not SE0, try again later\n");
690 } else {
691 /* Start SRP */
692 langwell_otg_start_srp(&langwell->otg);
693 langwell_otg_add_timer(b_srp_res_tmr);
694 }
695 }
696 break;
697 case OTG_STATE_B_SRP_INIT:
698 if (!langwell->hsm.id) {
699 langwell->otg.default_a = 1;
700 langwell->hsm.a_srp_det = 0;
701
702 langwell_otg_drv_vbus(0);
703 langwell_otg_chrg_vbus(0);
704
705 langwell->otg.state = OTG_STATE_A_IDLE;
706 queue_work(langwell->qwork, &langwell->work);
707 } else if (langwell->hsm.b_sess_vld) {
708 langwell_otg_chrg_vbus(0);
709 if (langwell->client_ops) {
710 langwell->client_ops->resume(langwell->pdev);
711 langwell->otg.state = OTG_STATE_B_PERIPHERAL;
712 } else
713 otg_dbg("client driver not loaded.\n");
714 }
715 break;
716 case OTG_STATE_B_PERIPHERAL:
717 if (!langwell->hsm.id) {
718 langwell->otg.default_a = 1;
719 langwell->hsm.a_srp_det = 0;
720
721 langwell_otg_drv_vbus(0);
722 langwell_otg_chrg_vbus(0);
723 set_host_mode();
724
725 if (langwell->client_ops) {
726 langwell->client_ops->suspend(langwell->pdev,
727 PMSG_FREEZE);
728 } else
729 otg_dbg("client driver has been removed.\n");
730
731 langwell->otg.state = OTG_STATE_A_IDLE;
732 queue_work(langwell->qwork, &langwell->work);
733 } else if (!langwell->hsm.b_sess_vld) {
734 langwell->hsm.b_hnp_enable = 0;
735
736 if (langwell->client_ops) {
737 langwell->client_ops->suspend(langwell->pdev,
738 PMSG_FREEZE);
739 } else
740 otg_dbg("client driver has been removed.\n");
741
742 langwell->otg.state = OTG_STATE_B_IDLE;
743 } else if (langwell->hsm.b_bus_req && langwell->hsm.b_hnp_enable
744 && langwell->hsm.a_bus_suspend) {
745
746 if (langwell->client_ops) {
747 langwell->client_ops->suspend(langwell->pdev,
748 PMSG_FREEZE);
749 } else
750 otg_dbg("client driver has been removed.\n");
751
752 langwell_otg_HAAR(1);
753 langwell->hsm.a_conn = 0;
754
755 if (langwell->host_ops) {
756 langwell->host_ops->probe(langwell->pdev,
757 langwell->host_ops->id_table);
758 langwell->otg.state = OTG_STATE_B_WAIT_ACON;
759 } else
760 otg_dbg("host driver not loaded.\n");
761
762 langwell->hsm.a_bus_resume = 0;
763 langwell->hsm.b_ase0_brst_tmout = 0;
764 langwell_otg_add_timer(b_ase0_brst_tmr);
765 }
766 break;
767
768 case OTG_STATE_B_WAIT_ACON:
769 if (!langwell->hsm.id) {
770 langwell_otg_del_timer(b_ase0_brst_tmr);
771 langwell->otg.default_a = 1;
772 langwell->hsm.a_srp_det = 0;
773
774 langwell_otg_drv_vbus(0);
775 langwell_otg_chrg_vbus(0);
776 set_host_mode();
777
778 langwell_otg_HAAR(0);
779 if (langwell->host_ops)
780 langwell->host_ops->remove(langwell->pdev);
781 else
782 otg_dbg("host driver has been removed.\n");
783 langwell->otg.state = OTG_STATE_A_IDLE;
784 queue_work(langwell->qwork, &langwell->work);
785 } else if (!langwell->hsm.b_sess_vld) {
786 langwell_otg_del_timer(b_ase0_brst_tmr);
787 langwell->hsm.b_hnp_enable = 0;
788 langwell->hsm.b_bus_req = 0;
789 langwell_otg_chrg_vbus(0);
790 langwell_otg_HAAR(0);
791
792 if (langwell->host_ops)
793 langwell->host_ops->remove(langwell->pdev);
794 else
795 otg_dbg("host driver has been removed.\n");
796 langwell->otg.state = OTG_STATE_B_IDLE;
797 } else if (langwell->hsm.a_conn) {
798 langwell_otg_del_timer(b_ase0_brst_tmr);
799 langwell_otg_HAAR(0);
800 langwell->otg.state = OTG_STATE_B_HOST;
801 queue_work(langwell->qwork, &langwell->work);
802 } else if (langwell->hsm.a_bus_resume ||
803 langwell->hsm.b_ase0_brst_tmout) {
804 langwell_otg_del_timer(b_ase0_brst_tmr);
805 langwell_otg_HAAR(0);
806 langwell_otg_nsf_msg(7);
807
808 if (langwell->host_ops)
809 langwell->host_ops->remove(langwell->pdev);
810 else
811 otg_dbg("host driver has been removed.\n");
812
813 langwell->hsm.a_bus_suspend = 0;
814 langwell->hsm.b_bus_req = 0;
815
816 if (langwell->client_ops)
817 langwell->client_ops->resume(langwell->pdev);
818 else
819 otg_dbg("client driver not loaded.\n");
820
821 langwell->otg.state = OTG_STATE_B_PERIPHERAL;
822 }
823 break;
824
825 case OTG_STATE_B_HOST:
826 if (!langwell->hsm.id) {
827 langwell->otg.default_a = 1;
828 langwell->hsm.a_srp_det = 0;
829
830 langwell_otg_drv_vbus(0);
831 langwell_otg_chrg_vbus(0);
832 set_host_mode();
833 if (langwell->host_ops)
834 langwell->host_ops->remove(langwell->pdev);
835 else
836 otg_dbg("host driver has been removed.\n");
837 langwell->otg.state = OTG_STATE_A_IDLE;
838 queue_work(langwell->qwork, &langwell->work);
839 } else if (!langwell->hsm.b_sess_vld) {
840 langwell->hsm.b_hnp_enable = 0;
841 langwell->hsm.b_bus_req = 0;
842 langwell_otg_chrg_vbus(0);
843 if (langwell->host_ops)
844 langwell->host_ops->remove(langwell->pdev);
845 else
846 otg_dbg("host driver has been removed.\n");
847 langwell->otg.state = OTG_STATE_B_IDLE;
848 } else if ((!langwell->hsm.b_bus_req) ||
849 (!langwell->hsm.a_conn)) {
850 langwell->hsm.b_bus_req = 0;
851 langwell_otg_loc_sof(0);
852 if (langwell->host_ops)
853 langwell->host_ops->remove(langwell->pdev);
854 else
855 otg_dbg("host driver has been removed.\n");
856
857 langwell->hsm.a_bus_suspend = 0;
858
859 if (langwell->client_ops)
860 langwell->client_ops->resume(langwell->pdev);
861 else
862 otg_dbg("client driver not loaded.\n");
863
864 langwell->otg.state = OTG_STATE_B_PERIPHERAL;
865 }
866 break;
867
868 case OTG_STATE_A_IDLE:
869 langwell->otg.default_a = 1;
870 if (langwell->hsm.id) {
871 langwell->otg.default_a = 0;
872 langwell->hsm.b_bus_req = 0;
873 langwell_otg_drv_vbus(0);
874 langwell_otg_chrg_vbus(0);
875
876 langwell->otg.state = OTG_STATE_B_IDLE;
877 queue_work(langwell->qwork, &langwell->work);
878 } else if (langwell->hsm.a_sess_vld) {
879 langwell_otg_drv_vbus(1);
880 langwell->hsm.a_srp_det = 1;
881 langwell->hsm.a_wait_vrise_tmout = 0;
882 langwell_otg_add_timer(a_wait_vrise_tmr);
883 langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
884 queue_work(langwell->qwork, &langwell->work);
885 } else if (!langwell->hsm.a_bus_drop &&
886 (langwell->hsm.a_srp_det || langwell->hsm.a_bus_req)) {
887 langwell_otg_drv_vbus(1);
888 langwell->hsm.a_wait_vrise_tmout = 0;
889 langwell_otg_add_timer(a_wait_vrise_tmr);
890 langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
891 queue_work(langwell->qwork, &langwell->work);
892 }
893 break;
894 case OTG_STATE_A_WAIT_VRISE:
895 if (langwell->hsm.id) {
896 langwell_otg_del_timer(a_wait_vrise_tmr);
897 langwell->hsm.b_bus_req = 0;
898 langwell->otg.default_a = 0;
899 langwell_otg_drv_vbus(0);
900 langwell->otg.state = OTG_STATE_B_IDLE;
901 } else if (langwell->hsm.a_vbus_vld) {
902 langwell_otg_del_timer(a_wait_vrise_tmr);
903 if (langwell->host_ops)
904 langwell->host_ops->probe(langwell->pdev,
905 langwell->host_ops->id_table);
906 else
907 otg_dbg("host driver not loaded.\n");
908 langwell->hsm.b_conn = 0;
909 langwell->hsm.a_set_b_hnp_en = 0;
910 langwell->hsm.a_wait_bcon_tmout = 0;
911 langwell_otg_add_timer(a_wait_bcon_tmr);
912 langwell->otg.state = OTG_STATE_A_WAIT_BCON;
913 } else if (langwell->hsm.a_wait_vrise_tmout) {
914 if (langwell->hsm.a_vbus_vld) {
915 if (langwell->host_ops)
916 langwell->host_ops->probe(
917 langwell->pdev,
918 langwell->host_ops->id_table);
919 else
920 otg_dbg("host driver not loaded.\n");
921 langwell->hsm.b_conn = 0;
922 langwell->hsm.a_set_b_hnp_en = 0;
923 langwell->hsm.a_wait_bcon_tmout = 0;
924 langwell_otg_add_timer(a_wait_bcon_tmr);
925 langwell->otg.state = OTG_STATE_A_WAIT_BCON;
926 } else {
927 langwell_otg_drv_vbus(0);
928 langwell->otg.state = OTG_STATE_A_VBUS_ERR;
929 }
930 }
931 break;
932 case OTG_STATE_A_WAIT_BCON:
933 if (langwell->hsm.id) {
934 langwell_otg_del_timer(a_wait_bcon_tmr);
935
936 langwell->otg.default_a = 0;
937 langwell->hsm.b_bus_req = 0;
938 if (langwell->host_ops)
939 langwell->host_ops->remove(langwell->pdev);
940 else
941 otg_dbg("host driver has been removed.\n");
942 langwell_otg_drv_vbus(0);
943 langwell->otg.state = OTG_STATE_B_IDLE;
944 queue_work(langwell->qwork, &langwell->work);
945 } else if (!langwell->hsm.a_vbus_vld) {
946 langwell_otg_del_timer(a_wait_bcon_tmr);
947
948 if (langwell->host_ops)
949 langwell->host_ops->remove(langwell->pdev);
950 else
951 otg_dbg("host driver has been removed.\n");
952 langwell_otg_drv_vbus(0);
953 langwell->otg.state = OTG_STATE_A_VBUS_ERR;
954 } else if (langwell->hsm.a_bus_drop ||
955 (langwell->hsm.a_wait_bcon_tmout &&
956 !langwell->hsm.a_bus_req)) {
957 langwell_otg_del_timer(a_wait_bcon_tmr);
958
959 if (langwell->host_ops)
960 langwell->host_ops->remove(langwell->pdev);
961 else
962 otg_dbg("host driver has been removed.\n");
963 langwell_otg_drv_vbus(0);
964 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
965 } else if (langwell->hsm.b_conn) {
966 langwell_otg_del_timer(a_wait_bcon_tmr);
967
968 langwell->hsm.a_suspend_req = 0;
969 langwell->otg.state = OTG_STATE_A_HOST;
970 if (!langwell->hsm.a_bus_req &&
971 langwell->hsm.a_set_b_hnp_en) {
972 /* It is not safe enough to do a fast
973 * transistion from A_WAIT_BCON to
974 * A_SUSPEND */
975 msleep(10000);
976 if (langwell->hsm.a_bus_req)
977 break;
978
979 if (request_irq(langwell->pdev->irq,
980 otg_dummy_irq, IRQF_SHARED,
981 driver_name, langwell->regs) != 0) {
982 otg_dbg("request interrupt %d fail\n",
983 langwell->pdev->irq);
984 }
985
986 langwell_otg_HABA(1);
987 langwell->hsm.b_bus_resume = 0;
988 langwell->hsm.a_aidl_bdis_tmout = 0;
989 langwell_otg_add_timer(a_aidl_bdis_tmr);
990
991 langwell_otg_loc_sof(0);
992 langwell->otg.state = OTG_STATE_A_SUSPEND;
993 } else if (!langwell->hsm.a_bus_req &&
994 !langwell->hsm.a_set_b_hnp_en) {
995 struct pci_dev *pdev = langwell->pdev;
996 if (langwell->host_ops)
997 langwell->host_ops->remove(pdev);
998 else
999 otg_dbg("host driver removed.\n");
1000 langwell_otg_drv_vbus(0);
1001 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1002 }
1003 }
1004 break;
1005 case OTG_STATE_A_HOST:
1006 if (langwell->hsm.id) {
1007 langwell->otg.default_a = 0;
1008 langwell->hsm.b_bus_req = 0;
1009 if (langwell->host_ops)
1010 langwell->host_ops->remove(langwell->pdev);
1011 else
1012 otg_dbg("host driver has been removed.\n");
1013 langwell_otg_drv_vbus(0);
1014 langwell->otg.state = OTG_STATE_B_IDLE;
1015 queue_work(langwell->qwork, &langwell->work);
1016 } else if (langwell->hsm.a_bus_drop ||
1017 (!langwell->hsm.a_set_b_hnp_en && !langwell->hsm.a_bus_req)) {
1018 if (langwell->host_ops)
1019 langwell->host_ops->remove(langwell->pdev);
1020 else
1021 otg_dbg("host driver has been removed.\n");
1022 langwell_otg_drv_vbus(0);
1023 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1024 } else if (!langwell->hsm.a_vbus_vld) {
1025 if (langwell->host_ops)
1026 langwell->host_ops->remove(langwell->pdev);
1027 else
1028 otg_dbg("host driver has been removed.\n");
1029 langwell_otg_drv_vbus(0);
1030 langwell->otg.state = OTG_STATE_A_VBUS_ERR;
1031 } else if (langwell->hsm.a_set_b_hnp_en
1032 && !langwell->hsm.a_bus_req) {
1033 /* Set HABA to enable hardware assistance to signal
1034 * A-connect after receiver B-disconnect. Hardware
1035 * will then set client mode and enable URE, SLE and
1036 * PCE after the assistance. otg_dummy_irq is used to
1037 * clean these ints when client driver is not resumed.
1038 */
1039 if (request_irq(langwell->pdev->irq,
1040 otg_dummy_irq, IRQF_SHARED, driver_name,
1041 langwell->regs) != 0) {
1042 otg_dbg("request interrupt %d failed\n",
1043 langwell->pdev->irq);
1044 }
1045
1046 /* set HABA */
1047 langwell_otg_HABA(1);
1048 langwell->hsm.b_bus_resume = 0;
1049 langwell->hsm.a_aidl_bdis_tmout = 0;
1050 langwell_otg_add_timer(a_aidl_bdis_tmr);
1051 langwell_otg_loc_sof(0);
1052 langwell->otg.state = OTG_STATE_A_SUSPEND;
1053 } else if (!langwell->hsm.b_conn || !langwell->hsm.a_bus_req) {
1054 langwell->hsm.a_wait_bcon_tmout = 0;
1055 langwell->hsm.a_set_b_hnp_en = 0;
1056 langwell_otg_add_timer(a_wait_bcon_tmr);
1057 langwell->otg.state = OTG_STATE_A_WAIT_BCON;
1058 }
1059 break;
1060 case OTG_STATE_A_SUSPEND:
1061 if (langwell->hsm.id) {
1062 langwell_otg_del_timer(a_aidl_bdis_tmr);
1063 langwell_otg_HABA(0);
1064 free_irq(langwell->pdev->irq, langwell->regs);
1065 langwell->otg.default_a = 0;
1066 langwell->hsm.b_bus_req = 0;
1067 if (langwell->host_ops)
1068 langwell->host_ops->remove(langwell->pdev);
1069 else
1070 otg_dbg("host driver has been removed.\n");
1071 langwell_otg_drv_vbus(0);
1072 langwell->otg.state = OTG_STATE_B_IDLE;
1073 queue_work(langwell->qwork, &langwell->work);
1074 } else if (langwell->hsm.a_bus_req ||
1075 langwell->hsm.b_bus_resume) {
1076 langwell_otg_del_timer(a_aidl_bdis_tmr);
1077 langwell_otg_HABA(0);
1078 free_irq(langwell->pdev->irq, langwell->regs);
1079 langwell->hsm.a_suspend_req = 0;
1080 langwell_otg_loc_sof(1);
1081 langwell->otg.state = OTG_STATE_A_HOST;
1082 } else if (langwell->hsm.a_aidl_bdis_tmout ||
1083 langwell->hsm.a_bus_drop) {
1084 langwell_otg_del_timer(a_aidl_bdis_tmr);
1085 langwell_otg_HABA(0);
1086 free_irq(langwell->pdev->irq, langwell->regs);
1087 if (langwell->host_ops)
1088 langwell->host_ops->remove(langwell->pdev);
1089 else
1090 otg_dbg("host driver has been removed.\n");
1091 langwell_otg_drv_vbus(0);
1092 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1093 } else if (!langwell->hsm.b_conn &&
1094 langwell->hsm.a_set_b_hnp_en) {
1095 langwell_otg_del_timer(a_aidl_bdis_tmr);
1096 langwell_otg_HABA(0);
1097 free_irq(langwell->pdev->irq, langwell->regs);
1098
1099 if (langwell->host_ops)
1100 langwell->host_ops->remove(langwell->pdev);
1101 else
1102 otg_dbg("host driver has been removed.\n");
1103
1104 langwell->hsm.b_bus_suspend = 0;
1105 langwell->hsm.b_bus_suspend_vld = 0;
1106 langwell->hsm.b_bus_suspend_tmout = 0;
1107
1108 /* msleep(200); */
1109 if (langwell->client_ops)
1110 langwell->client_ops->resume(langwell->pdev);
1111 else
1112 otg_dbg("client driver not loaded.\n");
1113
1114 langwell_otg_add_timer(b_bus_suspend_tmr);
1115 langwell->otg.state = OTG_STATE_A_PERIPHERAL;
1116 break;
1117 } else if (!langwell->hsm.a_vbus_vld) {
1118 langwell_otg_del_timer(a_aidl_bdis_tmr);
1119 langwell_otg_HABA(0);
1120 free_irq(langwell->pdev->irq, langwell->regs);
1121 if (langwell->host_ops)
1122 langwell->host_ops->remove(langwell->pdev);
1123 else
1124 otg_dbg("host driver has been removed.\n");
1125 langwell_otg_drv_vbus(0);
1126 langwell->otg.state = OTG_STATE_A_VBUS_ERR;
1127 }
1128 break;
1129 case OTG_STATE_A_PERIPHERAL:
1130 if (langwell->hsm.id) {
1131 langwell_otg_del_timer(b_bus_suspend_tmr);
1132 langwell->otg.default_a = 0;
1133 langwell->hsm.b_bus_req = 0;
1134 if (langwell->client_ops)
1135 langwell->client_ops->suspend(langwell->pdev,
1136 PMSG_FREEZE);
1137 else
1138 otg_dbg("client driver has been removed.\n");
1139 langwell_otg_drv_vbus(0);
1140 langwell->otg.state = OTG_STATE_B_IDLE;
1141 queue_work(langwell->qwork, &langwell->work);
1142 } else if (!langwell->hsm.a_vbus_vld) {
1143 langwell_otg_del_timer(b_bus_suspend_tmr);
1144 if (langwell->client_ops)
1145 langwell->client_ops->suspend(langwell->pdev,
1146 PMSG_FREEZE);
1147 else
1148 otg_dbg("client driver has been removed.\n");
1149 langwell_otg_drv_vbus(0);
1150 langwell->otg.state = OTG_STATE_A_VBUS_ERR;
1151 } else if (langwell->hsm.a_bus_drop) {
1152 langwell_otg_del_timer(b_bus_suspend_tmr);
1153 if (langwell->client_ops)
1154 langwell->client_ops->suspend(langwell->pdev,
1155 PMSG_FREEZE);
1156 else
1157 otg_dbg("client driver has been removed.\n");
1158 langwell_otg_drv_vbus(0);
1159 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1160 } else if (langwell->hsm.b_bus_suspend) {
1161 langwell_otg_del_timer(b_bus_suspend_tmr);
1162 if (langwell->client_ops)
1163 langwell->client_ops->suspend(langwell->pdev,
1164 PMSG_FREEZE);
1165 else
1166 otg_dbg("client driver has been removed.\n");
1167
1168 if (langwell->host_ops)
1169 langwell->host_ops->probe(langwell->pdev,
1170 langwell->host_ops->id_table);
1171 else
1172 otg_dbg("host driver not loaded.\n");
1173 langwell->hsm.a_set_b_hnp_en = 0;
1174 langwell->hsm.a_wait_bcon_tmout = 0;
1175 langwell_otg_add_timer(a_wait_bcon_tmr);
1176 langwell->otg.state = OTG_STATE_A_WAIT_BCON;
1177 } else if (langwell->hsm.b_bus_suspend_tmout) {
1178 u32 val;
1179 val = readl(langwell->regs + CI_PORTSC1);
1180 if (!(val & PORTSC_SUSP))
1181 break;
1182 if (langwell->client_ops)
1183 langwell->client_ops->suspend(langwell->pdev,
1184 PMSG_FREEZE);
1185 else
1186 otg_dbg("client driver has been removed.\n");
1187 if (langwell->host_ops)
1188 langwell->host_ops->probe(langwell->pdev,
1189 langwell->host_ops->id_table);
1190 else
1191 otg_dbg("host driver not loaded.\n");
1192 langwell->hsm.a_set_b_hnp_en = 0;
1193 langwell->hsm.a_wait_bcon_tmout = 0;
1194 langwell_otg_add_timer(a_wait_bcon_tmr);
1195 langwell->otg.state = OTG_STATE_A_WAIT_BCON;
1196 }
1197 break;
1198 case OTG_STATE_A_VBUS_ERR:
1199 if (langwell->hsm.id) {
1200 langwell->otg.default_a = 0;
1201 langwell->hsm.a_clr_err = 0;
1202 langwell->hsm.a_srp_det = 0;
1203 langwell->otg.state = OTG_STATE_B_IDLE;
1204 queue_work(langwell->qwork, &langwell->work);
1205 } else if (langwell->hsm.a_clr_err) {
1206 langwell->hsm.a_clr_err = 0;
1207 langwell->hsm.a_srp_det = 0;
1208 reset_otg();
1209 init_hsm();
1210 if (langwell->otg.state == OTG_STATE_A_IDLE)
1211 queue_work(langwell->qwork, &langwell->work);
1212 }
1213 break;
1214 case OTG_STATE_A_WAIT_VFALL:
1215 if (langwell->hsm.id) {
1216 langwell->otg.default_a = 0;
1217 langwell->otg.state = OTG_STATE_B_IDLE;
1218 queue_work(langwell->qwork, &langwell->work);
1219 } else if (langwell->hsm.a_bus_req) {
1220 langwell_otg_drv_vbus(1);
1221 langwell->hsm.a_wait_vrise_tmout = 0;
1222 langwell_otg_add_timer(a_wait_vrise_tmr);
1223 langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
1224 } else if (!langwell->hsm.a_sess_vld) {
1225 langwell->hsm.a_srp_det = 0;
1226 langwell_otg_drv_vbus(0);
1227 set_host_mode();
1228 langwell->otg.state = OTG_STATE_A_IDLE;
1229 }
1230 break;
1231 default:
1232 ;
1233 }
1234
1235 otg_dbg("%s: new state = %s\n", __func__,
1236 state_string(langwell->otg.state));
1237}
1238
1239 static ssize_t
1240show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
1241{
1242 struct langwell_otg *langwell;
1243 char *next;
1244 unsigned size;
1245 unsigned t;
1246
1247 langwell = the_transceiver;
1248 next = buf;
1249 size = PAGE_SIZE;
1250
1251 t = scnprintf(next, size,
1252 "\n"
1253 "USBCMD = 0x%08x \n"
1254 "USBSTS = 0x%08x \n"
1255 "USBINTR = 0x%08x \n"
1256 "ASYNCLISTADDR = 0x%08x \n"
1257 "PORTSC1 = 0x%08x \n"
1258 "HOSTPC1 = 0x%08x \n"
1259 "OTGSC = 0x%08x \n"
1260 "USBMODE = 0x%08x \n",
1261 readl(langwell->regs + 0x30),
1262 readl(langwell->regs + 0x34),
1263 readl(langwell->regs + 0x38),
1264 readl(langwell->regs + 0x48),
1265 readl(langwell->regs + 0x74),
1266 readl(langwell->regs + 0xb4),
1267 readl(langwell->regs + 0xf4),
1268 readl(langwell->regs + 0xf8)
1269 );
1270 size -= t;
1271 next += t;
1272
1273 return PAGE_SIZE - size;
1274}
1275static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
1276
1277static ssize_t
1278show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
1279{
1280 struct langwell_otg *langwell;
1281 char *next;
1282 unsigned size;
1283 unsigned t;
1284
1285 langwell = the_transceiver;
1286 next = buf;
1287 size = PAGE_SIZE;
1288
1289 t = scnprintf(next, size,
1290 "\n"
1291 "current state = %s\n"
1292 "a_bus_resume = \t%d\n"
1293 "a_bus_suspend = \t%d\n"
1294 "a_conn = \t%d\n"
1295 "a_sess_vld = \t%d\n"
1296 "a_srp_det = \t%d\n"
1297 "a_vbus_vld = \t%d\n"
1298 "b_bus_resume = \t%d\n"
1299 "b_bus_suspend = \t%d\n"
1300 "b_conn = \t%d\n"
1301 "b_se0_srp = \t%d\n"
1302 "b_sess_end = \t%d\n"
1303 "b_sess_vld = \t%d\n"
1304 "id = \t%d\n"
1305 "a_set_b_hnp_en = \t%d\n"
1306 "b_srp_done = \t%d\n"
1307 "b_hnp_enable = \t%d\n"
1308 "a_wait_vrise_tmout = \t%d\n"
1309 "a_wait_bcon_tmout = \t%d\n"
1310 "a_aidl_bdis_tmout = \t%d\n"
1311 "b_ase0_brst_tmout = \t%d\n"
1312 "a_bus_drop = \t%d\n"
1313 "a_bus_req = \t%d\n"
1314 "a_clr_err = \t%d\n"
1315 "a_suspend_req = \t%d\n"
1316 "b_bus_req = \t%d\n"
1317 "b_bus_suspend_tmout = \t%d\n"
1318 "b_bus_suspend_vld = \t%d\n",
1319 state_string(langwell->otg.state),
1320 langwell->hsm.a_bus_resume,
1321 langwell->hsm.a_bus_suspend,
1322 langwell->hsm.a_conn,
1323 langwell->hsm.a_sess_vld,
1324 langwell->hsm.a_srp_det,
1325 langwell->hsm.a_vbus_vld,
1326 langwell->hsm.b_bus_resume,
1327 langwell->hsm.b_bus_suspend,
1328 langwell->hsm.b_conn,
1329 langwell->hsm.b_se0_srp,
1330 langwell->hsm.b_sess_end,
1331 langwell->hsm.b_sess_vld,
1332 langwell->hsm.id,
1333 langwell->hsm.a_set_b_hnp_en,
1334 langwell->hsm.b_srp_done,
1335 langwell->hsm.b_hnp_enable,
1336 langwell->hsm.a_wait_vrise_tmout,
1337 langwell->hsm.a_wait_bcon_tmout,
1338 langwell->hsm.a_aidl_bdis_tmout,
1339 langwell->hsm.b_ase0_brst_tmout,
1340 langwell->hsm.a_bus_drop,
1341 langwell->hsm.a_bus_req,
1342 langwell->hsm.a_clr_err,
1343 langwell->hsm.a_suspend_req,
1344 langwell->hsm.b_bus_req,
1345 langwell->hsm.b_bus_suspend_tmout,
1346 langwell->hsm.b_bus_suspend_vld
1347 );
1348 size -= t;
1349 next += t;
1350
1351 return PAGE_SIZE - size;
1352}
1353static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
1354
1355static ssize_t
1356get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
1357{
1358 struct langwell_otg *langwell;
1359 char *next;
1360 unsigned size;
1361 unsigned t;
1362
1363 langwell = the_transceiver;
1364 next = buf;
1365 size = PAGE_SIZE;
1366
1367 t = scnprintf(next, size, "%d", langwell->hsm.a_bus_req);
1368 size -= t;
1369 next += t;
1370
1371 return PAGE_SIZE - size;
1372}
1373
1374static ssize_t
1375set_a_bus_req(struct device *dev, struct device_attribute *attr,
1376 const char *buf, size_t count)
1377{
1378 struct langwell_otg *langwell;
1379 langwell = the_transceiver;
1380 if (!langwell->otg.default_a)
1381 return -1;
1382 if (count > 2)
1383 return -1;
1384
1385 if (buf[0] == '0') {
1386 langwell->hsm.a_bus_req = 0;
1387 otg_dbg("a_bus_req = 0\n");
1388 } else if (buf[0] == '1') {
1389 /* If a_bus_drop is TRUE, a_bus_req can't be set */
1390 if (langwell->hsm.a_bus_drop)
1391 return -1;
1392 langwell->hsm.a_bus_req = 1;
1393 otg_dbg("a_bus_req = 1\n");
1394 }
1395 if (spin_trylock(&langwell->wq_lock)) {
1396 queue_work(langwell->qwork, &langwell->work);
1397 spin_unlock(&langwell->wq_lock);
1398 }
1399 return count;
1400}
1401static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req);
1402
1403static ssize_t
1404get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
1405{
1406 struct langwell_otg *langwell;
1407 char *next;
1408 unsigned size;
1409 unsigned t;
1410
1411 langwell = the_transceiver;
1412 next = buf;
1413 size = PAGE_SIZE;
1414
1415 t = scnprintf(next, size, "%d", langwell->hsm.a_bus_drop);
1416 size -= t;
1417 next += t;
1418
1419 return PAGE_SIZE - size;
1420}
1421
1422static ssize_t
1423set_a_bus_drop(struct device *dev, struct device_attribute *attr,
1424 const char *buf, size_t count)
1425{
1426 struct langwell_otg *langwell;
1427 langwell = the_transceiver;
1428 if (!langwell->otg.default_a)
1429 return -1;
1430 if (count > 2)
1431 return -1;
1432
1433 if (buf[0] == '0') {
1434 langwell->hsm.a_bus_drop = 0;
1435 otg_dbg("a_bus_drop = 0\n");
1436 } else if (buf[0] == '1') {
1437 langwell->hsm.a_bus_drop = 1;
1438 langwell->hsm.a_bus_req = 0;
1439 otg_dbg("a_bus_drop = 1, then a_bus_req = 0\n");
1440 }
1441 if (spin_trylock(&langwell->wq_lock)) {
1442 queue_work(langwell->qwork, &langwell->work);
1443 spin_unlock(&langwell->wq_lock);
1444 }
1445 return count;
1446}
1447static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO,
1448 get_a_bus_drop, set_a_bus_drop);
1449
1450static ssize_t
1451get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
1452{
1453 struct langwell_otg *langwell;
1454 char *next;
1455 unsigned size;
1456 unsigned t;
1457
1458 langwell = the_transceiver;
1459 next = buf;
1460 size = PAGE_SIZE;
1461
1462 t = scnprintf(next, size, "%d", langwell->hsm.b_bus_req);
1463 size -= t;
1464 next += t;
1465
1466 return PAGE_SIZE - size;
1467}
1468
1469static ssize_t
1470set_b_bus_req(struct device *dev, struct device_attribute *attr,
1471 const char *buf, size_t count)
1472{
1473 struct langwell_otg *langwell;
1474 langwell = the_transceiver;
1475
1476 if (langwell->otg.default_a)
1477 return -1;
1478
1479 if (count > 2)
1480 return -1;
1481
1482 if (buf[0] == '0') {
1483 langwell->hsm.b_bus_req = 0;
1484 otg_dbg("b_bus_req = 0\n");
1485 } else if (buf[0] == '1') {
1486 langwell->hsm.b_bus_req = 1;
1487 otg_dbg("b_bus_req = 1\n");
1488 }
1489 if (spin_trylock(&langwell->wq_lock)) {
1490 queue_work(langwell->qwork, &langwell->work);
1491 spin_unlock(&langwell->wq_lock);
1492 }
1493 return count;
1494}
1495static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req);
1496
1497static ssize_t
1498set_a_clr_err(struct device *dev, struct device_attribute *attr,
1499 const char *buf, size_t count)
1500{
1501 struct langwell_otg *langwell;
1502 langwell = the_transceiver;
1503
1504 if (!langwell->otg.default_a)
1505 return -1;
1506 if (count > 2)
1507 return -1;
1508
1509 if (buf[0] == '1') {
1510 langwell->hsm.a_clr_err = 1;
1511 otg_dbg("a_clr_err = 1\n");
1512 }
1513 if (spin_trylock(&langwell->wq_lock)) {
1514 queue_work(langwell->qwork, &langwell->work);
1515 spin_unlock(&langwell->wq_lock);
1516 }
1517 return count;
1518}
1519static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err);
1520
1521static struct attribute *inputs_attrs[] = {
1522 &dev_attr_a_bus_req.attr,
1523 &dev_attr_a_bus_drop.attr,
1524 &dev_attr_b_bus_req.attr,
1525 &dev_attr_a_clr_err.attr,
1526 NULL,
1527};
1528
1529static struct attribute_group debug_dev_attr_group = {
1530 .name = "inputs",
1531 .attrs = inputs_attrs,
1532};
1533
1534int langwell_register_host(struct pci_driver *host_driver)
1535{
1536 int ret = 0;
1537
1538 the_transceiver->host_ops = host_driver;
1539 queue_work(the_transceiver->qwork, &the_transceiver->work);
1540 otg_dbg("host controller driver is registered\n");
1541
1542 return ret;
1543}
1544EXPORT_SYMBOL(langwell_register_host);
1545
1546void langwell_unregister_host(struct pci_driver *host_driver)
1547{
1548 if (the_transceiver->host_ops)
1549 the_transceiver->host_ops->remove(the_transceiver->pdev);
1550 the_transceiver->host_ops = NULL;
1551 the_transceiver->hsm.a_bus_drop = 1;
1552 queue_work(the_transceiver->qwork, &the_transceiver->work);
1553 otg_dbg("host controller driver is unregistered\n");
1554}
1555EXPORT_SYMBOL(langwell_unregister_host);
1556
1557int langwell_register_peripheral(struct pci_driver *client_driver)
1558{
1559 int ret = 0;
1560
1561 if (client_driver)
1562 ret = client_driver->probe(the_transceiver->pdev,
1563 client_driver->id_table);
1564 if (!ret) {
1565 the_transceiver->client_ops = client_driver;
1566 queue_work(the_transceiver->qwork, &the_transceiver->work);
1567 otg_dbg("client controller driver is registered\n");
1568 }
1569
1570 return ret;
1571}
1572EXPORT_SYMBOL(langwell_register_peripheral);
1573
1574void langwell_unregister_peripheral(struct pci_driver *client_driver)
1575{
1576 if (the_transceiver->client_ops)
1577 the_transceiver->client_ops->remove(the_transceiver->pdev);
1578 the_transceiver->client_ops = NULL;
1579 the_transceiver->hsm.b_bus_req = 0;
1580 queue_work(the_transceiver->qwork, &the_transceiver->work);
1581 otg_dbg("client controller driver is unregistered\n");
1582}
1583EXPORT_SYMBOL(langwell_unregister_peripheral);
1584
1585static int langwell_otg_probe(struct pci_dev *pdev,
1586 const struct pci_device_id *id)
1587{
1588 unsigned long resource, len;
1589 void __iomem *base = NULL;
1590 int retval;
1591 u32 val32;
1592 struct langwell_otg *langwell;
1593 char qname[] = "langwell_otg_queue";
1594
1595 retval = 0;
1596 otg_dbg("\notg controller is detected.\n");
1597 if (pci_enable_device(pdev) < 0) {
1598 retval = -ENODEV;
1599 goto done;
1600 }
1601
1602 langwell = kzalloc(sizeof *langwell, GFP_KERNEL);
1603 if (langwell == NULL) {
1604 retval = -ENOMEM;
1605 goto done;
1606 }
1607 the_transceiver = langwell;
1608
1609 /* control register: BAR 0 */
1610 resource = pci_resource_start(pdev, 0);
1611 len = pci_resource_len(pdev, 0);
1612 if (!request_mem_region(resource, len, driver_name)) {
1613 retval = -EBUSY;
1614 goto err;
1615 }
1616 langwell->region = 1;
1617
1618 base = ioremap_nocache(resource, len);
1619 if (base == NULL) {
1620 retval = -EFAULT;
1621 goto err;
1622 }
1623 langwell->regs = base;
1624
1625 if (!pdev->irq) {
1626 otg_dbg("No IRQ.\n");
1627 retval = -ENODEV;
1628 goto err;
1629 }
1630
1631 langwell->qwork = create_workqueue(qname);
1632 if (!langwell->qwork) {
1633 otg_dbg("cannot create workqueue %s\n", qname);
1634 retval = -ENOMEM;
1635 goto err;
1636 }
1637 INIT_WORK(&langwell->work, langwell_otg_work);
1638
1639 /* OTG common part */
1640 langwell->pdev = pdev;
1641 langwell->otg.dev = &pdev->dev;
1642 langwell->otg.label = driver_name;
1643 langwell->otg.set_host = langwell_otg_set_host;
1644 langwell->otg.set_peripheral = langwell_otg_set_peripheral;
1645 langwell->otg.set_power = langwell_otg_set_power;
1646 langwell->otg.start_srp = langwell_otg_start_srp;
1647 langwell->otg.state = OTG_STATE_UNDEFINED;
1648 if (otg_set_transceiver(&langwell->otg)) {
1649 otg_dbg("can't set transceiver\n");
1650 retval = -EBUSY;
1651 goto err;
1652 }
1653
1654 reset_otg();
1655 init_hsm();
1656
1657 spin_lock_init(&langwell->lock);
1658 spin_lock_init(&langwell->wq_lock);
1659 INIT_LIST_HEAD(&active_timers);
1660 langwell_otg_init_timers(&langwell->hsm);
1661
1662 if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
1663 driver_name, langwell) != 0) {
1664 otg_dbg("request interrupt %d failed\n", pdev->irq);
1665 retval = -EBUSY;
1666 goto err;
1667 }
1668
1669 /* enable OTGSC int */
1670 val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
1671 OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
1672 writel(val32, langwell->regs + CI_OTGSC);
1673
1674 retval = device_create_file(&pdev->dev, &dev_attr_registers);
1675 if (retval < 0) {
1676 otg_dbg("Can't register sysfs attribute: %d\n", retval);
1677 goto err;
1678 }
1679
1680 retval = device_create_file(&pdev->dev, &dev_attr_hsm);
1681 if (retval < 0) {
1682 otg_dbg("Can't hsm sysfs attribute: %d\n", retval);
1683 goto err;
1684 }
1685
1686 retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
1687 if (retval < 0) {
1688 otg_dbg("Can't register sysfs attr group: %d\n", retval);
1689 goto err;
1690 }
1691
1692 if (langwell->otg.state == OTG_STATE_A_IDLE)
1693 queue_work(langwell->qwork, &langwell->work);
1694
1695 return 0;
1696
1697err:
1698 if (the_transceiver)
1699 langwell_otg_remove(pdev);
1700done:
1701 return retval;
1702}
1703
1704static void langwell_otg_remove(struct pci_dev *pdev)
1705{
1706 struct langwell_otg *langwell;
1707
1708 langwell = the_transceiver;
1709
1710 if (langwell->qwork) {
1711 flush_workqueue(langwell->qwork);
1712 destroy_workqueue(langwell->qwork);
1713 }
1714 langwell_otg_free_timers();
1715
1716 /* disable OTGSC interrupt as OTGSC doesn't change in reset */
1717 writel(0, langwell->regs + CI_OTGSC);
1718
1719 if (pdev->irq)
1720 free_irq(pdev->irq, langwell);
1721 if (langwell->regs)
1722 iounmap(langwell->regs);
1723 if (langwell->region)
1724 release_mem_region(pci_resource_start(pdev, 0),
1725 pci_resource_len(pdev, 0));
1726
1727 otg_set_transceiver(NULL);
1728 pci_disable_device(pdev);
1729 sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
1730 device_remove_file(&pdev->dev, &dev_attr_hsm);
1731 device_remove_file(&pdev->dev, &dev_attr_registers);
1732 kfree(langwell);
1733 langwell = NULL;
1734}
1735
1736static void transceiver_suspend(struct pci_dev *pdev)
1737{
1738 pci_save_state(pdev);
1739 pci_set_power_state(pdev, PCI_D3hot);
1740 langwell_otg_phy_low_power(1);
1741}
1742
1743static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
1744{
1745 int ret = 0;
1746 struct langwell_otg *langwell;
1747
1748 langwell = the_transceiver;
1749
1750 /* Disbale OTG interrupts */
1751 langwell_otg_intr(0);
1752
1753 if (pdev->irq)
1754 free_irq(pdev->irq, langwell);
1755
1756 /* Prevent more otg_work */
1757 flush_workqueue(langwell->qwork);
1758 spin_lock(&langwell->wq_lock);
1759
1760 /* start actions */
1761 switch (langwell->otg.state) {
1762 case OTG_STATE_A_IDLE:
1763 case OTG_STATE_B_IDLE:
1764 case OTG_STATE_A_WAIT_VFALL:
1765 case OTG_STATE_A_VBUS_ERR:
1766 transceiver_suspend(pdev);
1767 break;
1768 case OTG_STATE_A_WAIT_VRISE:
1769 langwell_otg_del_timer(a_wait_vrise_tmr);
1770 langwell->hsm.a_srp_det = 0;
1771 langwell_otg_drv_vbus(0);
1772 langwell->otg.state = OTG_STATE_A_IDLE;
1773 transceiver_suspend(pdev);
1774 break;
1775 case OTG_STATE_A_WAIT_BCON:
1776 langwell_otg_del_timer(a_wait_bcon_tmr);
1777 if (langwell->host_ops)
1778 ret = langwell->host_ops->suspend(pdev, message);
1779 langwell_otg_drv_vbus(0);
1780 break;
1781 case OTG_STATE_A_HOST:
1782 if (langwell->host_ops)
1783 ret = langwell->host_ops->suspend(pdev, message);
1784 langwell_otg_drv_vbus(0);
1785 langwell_otg_phy_low_power(1);
1786 break;
1787 case OTG_STATE_A_SUSPEND:
1788 langwell_otg_del_timer(a_aidl_bdis_tmr);
1789 langwell_otg_HABA(0);
1790 if (langwell->host_ops)
1791 langwell->host_ops->remove(pdev);
1792 else
1793 otg_dbg("host driver has been removed.\n");
1794 langwell_otg_drv_vbus(0);
1795 transceiver_suspend(pdev);
1796 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1797 break;
1798 case OTG_STATE_A_PERIPHERAL:
1799 if (langwell->client_ops)
1800 ret = langwell->client_ops->suspend(pdev, message);
1801 else
1802 otg_dbg("client driver has been removed.\n");
1803 langwell_otg_drv_vbus(0);
1804 transceiver_suspend(pdev);
1805 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1806 break;
1807 case OTG_STATE_B_HOST:
1808 if (langwell->host_ops)
1809 langwell->host_ops->remove(pdev);
1810 else
1811 otg_dbg("host driver has been removed.\n");
1812 langwell->hsm.b_bus_req = 0;
1813 transceiver_suspend(pdev);
1814 langwell->otg.state = OTG_STATE_B_IDLE;
1815 break;
1816 case OTG_STATE_B_PERIPHERAL:
1817 if (langwell->client_ops)
1818 ret = langwell->client_ops->suspend(pdev, message);
1819 else
1820 otg_dbg("client driver has been removed.\n");
1821 break;
1822 case OTG_STATE_B_WAIT_ACON:
1823 langwell_otg_del_timer(b_ase0_brst_tmr);
1824 langwell_otg_HAAR(0);
1825 if (langwell->host_ops)
1826 langwell->host_ops->remove(pdev);
1827 else
1828 otg_dbg("host driver has been removed.\n");
1829 langwell->hsm.b_bus_req = 0;
1830 langwell->otg.state = OTG_STATE_B_IDLE;
1831 transceiver_suspend(pdev);
1832 break;
1833 default:
1834 otg_dbg("error state before suspend\n ");
1835 break;
1836 }
1837 spin_unlock(&langwell->wq_lock);
1838
1839 return ret;
1840}
1841
1842static void transceiver_resume(struct pci_dev *pdev)
1843{
1844 pci_restore_state(pdev);
1845 pci_set_power_state(pdev, PCI_D0);
1846 langwell_otg_phy_low_power(0);
1847}
1848
1849static int langwell_otg_resume(struct pci_dev *pdev)
1850{
1851 int ret = 0;
1852 struct langwell_otg *langwell;
1853
1854 langwell = the_transceiver;
1855
1856 spin_lock(&langwell->wq_lock);
1857
1858 switch (langwell->otg.state) {
1859 case OTG_STATE_A_IDLE:
1860 case OTG_STATE_B_IDLE:
1861 case OTG_STATE_A_WAIT_VFALL:
1862 case OTG_STATE_A_VBUS_ERR:
1863 transceiver_resume(pdev);
1864 break;
1865 case OTG_STATE_A_WAIT_BCON:
1866 langwell_otg_add_timer(a_wait_bcon_tmr);
1867 langwell_otg_drv_vbus(1);
1868 if (langwell->host_ops)
1869 ret = langwell->host_ops->resume(pdev);
1870 break;
1871 case OTG_STATE_A_HOST:
1872 langwell_otg_drv_vbus(1);
1873 langwell_otg_phy_low_power(0);
1874 if (langwell->host_ops)
1875 ret = langwell->host_ops->resume(pdev);
1876 break;
1877 case OTG_STATE_B_PERIPHERAL:
1878 if (langwell->client_ops)
1879 ret = langwell->client_ops->resume(pdev);
1880 else
1881 otg_dbg("client driver not loaded.\n");
1882 break;
1883 default:
1884 otg_dbg("error state before suspend\n ");
1885 break;
1886 }
1887
1888 if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
1889 driver_name, the_transceiver) != 0) {
1890 otg_dbg("request interrupt %d failed\n", pdev->irq);
1891 ret = -EBUSY;
1892 }
1893
1894 /* enable OTG interrupts */
1895 langwell_otg_intr(1);
1896
1897 spin_unlock(&langwell->wq_lock);
1898
1899 queue_work(langwell->qwork, &langwell->work);
1900
1901
1902 return ret;
1903}
1904
1905static int __init langwell_otg_init(void)
1906{
1907 return pci_register_driver(&otg_pci_driver);
1908}
1909module_init(langwell_otg_init);
1910
1911static void __exit langwell_otg_cleanup(void)
1912{
1913 pci_unregister_driver(&otg_pci_driver);
1914}
1915module_exit(langwell_otg_cleanup);
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
index 9ed5ea568679..af456b48985f 100644
--- a/drivers/usb/otg/nop-usb-xceiv.c
+++ b/drivers/usb/otg/nop-usb-xceiv.c
@@ -53,6 +53,7 @@ EXPORT_SYMBOL(usb_nop_xceiv_register);
53void usb_nop_xceiv_unregister(void) 53void usb_nop_xceiv_unregister(void)
54{ 54{
55 platform_device_unregister(pd); 55 platform_device_unregister(pd);
56 pd = NULL;
56} 57}
57EXPORT_SYMBOL(usb_nop_xceiv_unregister); 58EXPORT_SYMBOL(usb_nop_xceiv_unregister);
58 59
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 247b61bfb7f4..0e4f2e41ace5 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -169,9 +169,11 @@ static int usb_console_setup(struct console *co, char *options)
169 kfree(tty); 169 kfree(tty);
170 } 170 }
171 } 171 }
172 /* So we know not to kill the hardware on a hangup on this 172 /* Now that any required fake tty operations are completed restore
173 port. We have also bumped the use count by one so it won't go 173 * the tty port count */
174 idle */ 174 --port->port.count;
175 /* The console is special in terms of closing the device so
176 * indicate this port is now acting as a system console. */
175 port->console = 1; 177 port->console = 1;
176 retval = 0; 178 retval = 0;
177 179
@@ -204,7 +206,7 @@ static void usb_console_write(struct console *co,
204 206
205 dbg("%s - port %d, %d byte(s)", __func__, port->number, count); 207 dbg("%s - port %d, %d byte(s)", __func__, port->number, count);
206 208
207 if (!port->port.count) { 209 if (!port->console) {
208 dbg("%s - port not opened", __func__); 210 dbg("%s - port not opened", __func__);
209 return; 211 return;
210 } 212 }
@@ -300,8 +302,7 @@ void usb_serial_console_exit(void)
300{ 302{
301 if (usbcons_info.port) { 303 if (usbcons_info.port) {
302 unregister_console(&usbcons); 304 unregister_console(&usbcons);
303 if (usbcons_info.port->port.count) 305 usbcons_info.port->console = 0;
304 usbcons_info.port->port.count--;
305 usbcons_info.port = NULL; 306 usbcons_info.port = NULL;
306 } 307 }
307} 308}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 2b9eeda62bfe..985cbcf48bda 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -67,6 +67,8 @@ static struct usb_device_id id_table [] = {
67 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ 67 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
68 { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ 68 { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
69 { USB_DEVICE(0x10C4, 0x0F91) }, /* Vstabi */ 69 { USB_DEVICE(0x10C4, 0x0F91) }, /* Vstabi */
70 { USB_DEVICE(0x10C4, 0x1101) }, /* Arkham Technology DS101 Bus Monitor */
71 { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */
70 { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */ 72 { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
71 { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */ 73 { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
72 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ 74 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
@@ -78,6 +80,7 @@ static struct usb_device_id id_table [] = {
78 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ 80 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
79 { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ 81 { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
80 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ 82 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
83 { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
81 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ 84 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
82 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ 85 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
83 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ 86 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
@@ -94,7 +97,9 @@ static struct usb_device_id id_table [] = {
94 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ 97 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
95 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ 98 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
96 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 99 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
100 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
97 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ 101 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
102 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
98 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ 103 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
99 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 104 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
100 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 105 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 9734085fd2fe..59adfe123110 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1228,8 +1228,8 @@ static void cypress_read_int_callback(struct urb *urb)
1228 /* precursor to disconnect so just go away */ 1228 /* precursor to disconnect so just go away */
1229 return; 1229 return;
1230 case -EPIPE: 1230 case -EPIPE:
1231 usb_clear_halt(port->serial->dev, 0x81); 1231 /* Can't call usb_clear_halt while in_interrupt */
1232 break; 1232 /* FALLS THROUGH */
1233 default: 1233 default:
1234 /* something ugly is going on... */ 1234 /* something ugly is going on... */
1235 dev_err(&urb->dev->dev, 1235 dev_err(&urb->dev->dev,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 3dc3768ca71c..8fec5d4455c9 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -33,6 +33,7 @@
33#include <linux/errno.h> 33#include <linux/errno.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/smp_lock.h>
36#include <linux/tty.h> 37#include <linux/tty.h>
37#include <linux/tty_driver.h> 38#include <linux/tty_driver.h>
38#include <linux/tty_flip.h> 39#include <linux/tty_flip.h>
@@ -107,6 +108,7 @@ struct ftdi_sio_quirk {
107 108
108static int ftdi_jtag_probe(struct usb_serial *serial); 109static int ftdi_jtag_probe(struct usb_serial *serial);
109static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); 110static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
111static int ftdi_NDI_device_setup(struct usb_serial *serial);
110static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); 112static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
111static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); 113static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
112 114
@@ -118,6 +120,10 @@ static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = {
118 .probe = ftdi_mtxorb_hack_setup, 120 .probe = ftdi_mtxorb_hack_setup,
119}; 121};
120 122
123static struct ftdi_sio_quirk ftdi_NDI_device_quirk = {
124 .probe = ftdi_NDI_device_setup,
125};
126
121static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { 127static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
122 .port_probe = ftdi_USB_UIRT_setup, 128 .port_probe = ftdi_USB_UIRT_setup,
123}; 129};
@@ -191,6 +197,7 @@ static struct usb_device_id id_table_combined [] = {
191 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, 197 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) },
192 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, 198 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
193 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, 199 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
200 { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
194 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, 201 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
195 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, 202 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
196 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, 203 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
@@ -579,6 +586,9 @@ static struct usb_device_id id_table_combined [] = {
579 { USB_DEVICE(FTDI_VID, FTDI_CCSICDU20_0_PID) }, 586 { USB_DEVICE(FTDI_VID, FTDI_CCSICDU20_0_PID) },
580 { USB_DEVICE(FTDI_VID, FTDI_CCSICDU40_1_PID) }, 587 { USB_DEVICE(FTDI_VID, FTDI_CCSICDU40_1_PID) },
581 { USB_DEVICE(FTDI_VID, FTDI_CCSMACHX_2_PID) }, 588 { USB_DEVICE(FTDI_VID, FTDI_CCSMACHX_2_PID) },
589 { USB_DEVICE(FTDI_VID, FTDI_CCSLOAD_N_GO_3_PID) },
590 { USB_DEVICE(FTDI_VID, FTDI_CCSICDU64_4_PID) },
591 { USB_DEVICE(FTDI_VID, FTDI_CCSPRIME8_5_PID) },
582 { USB_DEVICE(FTDI_VID, INSIDE_ACCESSO) }, 592 { USB_DEVICE(FTDI_VID, INSIDE_ACCESSO) },
583 { USB_DEVICE(INTREPID_VID, INTREPID_VALUECAN_PID) }, 593 { USB_DEVICE(INTREPID_VID, INTREPID_VALUECAN_PID) },
584 { USB_DEVICE(INTREPID_VID, INTREPID_NEOVI_PID) }, 594 { USB_DEVICE(INTREPID_VID, INTREPID_NEOVI_PID) },
@@ -644,6 +654,16 @@ static struct usb_device_id id_table_combined [] = {
644 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, 654 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
645 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13U_PID) }, 655 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13U_PID) },
646 { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, 656 { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) },
657 { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID),
658 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
659 { USB_DEVICE(FTDI_VID, FTDI_NDI_SPECTRA_SCU_PID),
660 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
661 { USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_2_PID),
662 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
663 { USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_3_PID),
664 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
665 { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
666 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
647 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, 667 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
648 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, 668 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
649 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, 669 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
@@ -660,6 +680,8 @@ static struct usb_device_id id_table_combined [] = {
660 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 680 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
661 { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID), 681 { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
662 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 682 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
683 { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
684 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
663 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 685 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
664 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 686 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
665 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, 687 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
@@ -667,7 +689,6 @@ static struct usb_device_id id_table_combined [] = {
667 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, 689 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
668 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, 690 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
669 { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) }, 691 { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) },
670 { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID) },
671 { USB_DEVICE(ATMEL_VID, STK541_PID) }, 692 { USB_DEVICE(ATMEL_VID, STK541_PID) },
672 { USB_DEVICE(DE_VID, STB_PID) }, 693 { USB_DEVICE(DE_VID, STB_PID) },
673 { USB_DEVICE(DE_VID, WHT_PID) }, 694 { USB_DEVICE(DE_VID, WHT_PID) },
@@ -677,6 +698,10 @@ static struct usb_device_id id_table_combined [] = {
677 { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), 698 { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
678 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 699 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
679 { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, 700 { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
701 { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
702 { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
703 { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
704 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
680 { }, /* Optional parameter entry */ 705 { }, /* Optional parameter entry */
681 { } /* Terminating entry */ 706 { } /* Terminating entry */
682}; 707};
@@ -1023,6 +1048,16 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
1023 case FT2232C: /* FT2232C chip */ 1048 case FT2232C: /* FT2232C chip */
1024 case FT232RL: 1049 case FT232RL:
1025 if (baud <= 3000000) { 1050 if (baud <= 3000000) {
1051 __u16 product_id = le16_to_cpu(
1052 port->serial->dev->descriptor.idProduct);
1053 if (((FTDI_NDI_HUC_PID == product_id) ||
1054 (FTDI_NDI_SPECTRA_SCU_PID == product_id) ||
1055 (FTDI_NDI_FUTURE_2_PID == product_id) ||
1056 (FTDI_NDI_FUTURE_3_PID == product_id) ||
1057 (FTDI_NDI_AURORA_SCU_PID == product_id)) &&
1058 (baud == 19200)) {
1059 baud = 1200000;
1060 }
1026 div_value = ftdi_232bm_baud_to_divisor(baud); 1061 div_value = ftdi_232bm_baud_to_divisor(baud);
1027 } else { 1062 } else {
1028 dbg("%s - Baud rate too high!", __func__); 1063 dbg("%s - Baud rate too high!", __func__);
@@ -1554,6 +1589,39 @@ static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv)
1554} /* ftdi_HE_TIRA1_setup */ 1589} /* ftdi_HE_TIRA1_setup */
1555 1590
1556/* 1591/*
1592 * Module parameter to control latency timer for NDI FTDI-based USB devices.
1593 * If this value is not set in modprobe.conf.local its value will be set to 1ms.
1594 */
1595static int ndi_latency_timer = 1;
1596
1597/* Setup for the NDI FTDI-based USB devices, which requires hardwired
1598 * baudrate (19200 gets mapped to 1200000).
1599 *
1600 * Called from usbserial:serial_probe.
1601 */
1602static int ftdi_NDI_device_setup(struct usb_serial *serial)
1603{
1604 struct usb_device *udev = serial->dev;
1605 int latency = ndi_latency_timer;
1606 int rv = 0;
1607 char buf[1];
1608
1609 if (latency == 0)
1610 latency = 1;
1611 if (latency > 99)
1612 latency = 99;
1613
1614 dbg("%s setting NDI device latency to %d", __func__, latency);
1615 dev_info(&udev->dev, "NDI device with a latency value of %d", latency);
1616
1617 rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1618 FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
1619 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
1620 latency, 0, buf, 0, WDR_TIMEOUT);
1621 return 0;
1622}
1623
1624/*
1557 * First port on JTAG adaptors such as Olimex arm-usb-ocd or the FIC/OpenMoko 1625 * First port on JTAG adaptors such as Olimex arm-usb-ocd or the FIC/OpenMoko
1558 * Neo1973 Debug Board is reserved for JTAG interface and can be accessed from 1626 * Neo1973 Debug Board is reserved for JTAG interface and can be accessed from
1559 * userspace using openocd. 1627 * userspace using openocd.
@@ -2121,7 +2189,7 @@ static void ftdi_process_read(struct work_struct *work)
2121 /* Note that the error flag is duplicated for 2189 /* Note that the error flag is duplicated for
2122 every character received since we don't know 2190 every character received since we don't know
2123 which character it applied to */ 2191 which character it applied to */
2124 if (!usb_serial_handle_sysrq_char(port, 2192 if (!usb_serial_handle_sysrq_char(tty, port,
2125 data[packet_offset + i])) 2193 data[packet_offset + i]))
2126 tty_insert_flip_char(tty, 2194 tty_insert_flip_char(tty,
2127 data[packet_offset + i], 2195 data[packet_offset + i],
@@ -2622,3 +2690,5 @@ MODULE_PARM_DESC(vendor, "User specified vendor ID (default="
2622module_param(product, ushort, 0); 2690module_param(product, ushort, 0);
2623MODULE_PARM_DESC(product, "User specified product ID"); 2691MODULE_PARM_DESC(product, "User specified product ID");
2624 2692
2693module_param(ndi_latency_timer, int, S_IRUGO | S_IWUSR);
2694MODULE_PARM_DESC(ndi_latency_timer, "NDI device latency timer override");
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index f1d440a728a3..8c92b88166ae 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -506,6 +506,7 @@
506 * 506 *
507 * Armin Laeuger originally sent the PID for the UM 100 module. 507 * Armin Laeuger originally sent the PID for the UM 100 module.
508 */ 508 */
509#define FTDI_R2000KU_TRUE_RNG 0xFB80 /* R2000KU TRUE RNG */
509#define FTDI_ELV_UR100_PID 0xFB58 /* USB-RS232-Umsetzer (UR 100) */ 510#define FTDI_ELV_UR100_PID 0xFB58 /* USB-RS232-Umsetzer (UR 100) */
510#define FTDI_ELV_UM100_PID 0xFB5A /* USB-Modul UM 100 */ 511#define FTDI_ELV_UM100_PID 0xFB5A /* USB-Modul UM 100 */
511#define FTDI_ELV_UO100_PID 0xFB5B /* USB-Modul UO 100 */ 512#define FTDI_ELV_UO100_PID 0xFB5B /* USB-Modul UO 100 */
@@ -614,6 +615,9 @@
614#define FTDI_CCSICDU20_0_PID 0xF9D0 615#define FTDI_CCSICDU20_0_PID 0xF9D0
615#define FTDI_CCSICDU40_1_PID 0xF9D1 616#define FTDI_CCSICDU40_1_PID 0xF9D1
616#define FTDI_CCSMACHX_2_PID 0xF9D2 617#define FTDI_CCSMACHX_2_PID 0xF9D2
618#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3
619#define FTDI_CCSICDU64_4_PID 0xF9D4
620#define FTDI_CCSPRIME8_5_PID 0xF9D5
617 621
618/* Inside Accesso contactless reader (http://www.insidefr.com) */ 622/* Inside Accesso contactless reader (http://www.insidefr.com) */
619#define INSIDE_ACCESSO 0xFAD0 623#define INSIDE_ACCESSO 0xFAD0
@@ -736,6 +740,15 @@
736#define FTDI_PYRAMID_PID 0xE6C8 /* Pyramid Appliance Display */ 740#define FTDI_PYRAMID_PID 0xE6C8 /* Pyramid Appliance Display */
737 741
738/* 742/*
743 * NDI (www.ndigital.com) product ids
744 */
745#define FTDI_NDI_HUC_PID 0xDA70 /* NDI Host USB Converter */
746#define FTDI_NDI_SPECTRA_SCU_PID 0xDA71 /* NDI Spectra SCU */
747#define FTDI_NDI_FUTURE_2_PID 0xDA72 /* NDI future device #2 */
748#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */
749#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
750
751/*
739 * Posiflex inc retail equipment (http://www.posiflex.com.tw) 752 * Posiflex inc retail equipment (http://www.posiflex.com.tw)
740 */ 753 */
741#define POSIFLEX_VID 0x0d3a /* Vendor ID */ 754#define POSIFLEX_VID 0x0d3a /* Vendor ID */
@@ -848,9 +861,6 @@
848#define TML_VID 0x1B91 /* Vendor ID */ 861#define TML_VID 0x1B91 /* Vendor ID */
849#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */ 862#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
850 863
851/* NDI Polaris System */
852#define FTDI_NDI_HUC_PID 0xDA70
853
854/* Propox devices */ 864/* Propox devices */
855#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 865#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
856 866
@@ -934,6 +944,29 @@
934#define MARVELL_VID 0x9e88 944#define MARVELL_VID 0x9e88
935#define MARVELL_SHEEVAPLUG_PID 0x9e8f 945#define MARVELL_SHEEVAPLUG_PID 0x9e8f
936 946
947#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
948
949/*
950 * GN Otometrics (http://www.otometrics.com)
951 * Submitted by Ville Sundberg.
952 */
953#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */
954#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */
955
956/*
957 * Bayer Ascensia Contour blood glucose meter USB-converter cable.
958 * http://winglucofacts.com/cables/
959 */
960#define BAYER_VID 0x1A79
961#define BAYER_CONTOUR_CABLE_PID 0x6001
962
963/*
964 * Marvell OpenRD Base, Client
965 * http://www.open-rd.org
966 * OpenRD Base, Client use VID 0x0403
967 */
968#define MARVELL_OPENRD_PID 0x9e90
969
937/* 970/*
938 * BmRequestType: 1100 0000b 971 * BmRequestType: 1100 0000b
939 * bRequest: FTDI_E2_READ 972 * bRequest: FTDI_E2_READ
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 932d6241b787..ce57f6a32bdf 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -424,10 +424,17 @@ static void flush_and_resubmit_read_urb(struct usb_serial_port *port)
424 if (!tty) 424 if (!tty)
425 goto done; 425 goto done;
426 426
427 /* Push data to tty */ 427 /* The per character mucking around with sysrq path it too slow for
428 for (i = 0; i < urb->actual_length; i++, ch++) { 428 stuff like 3G modems, so shortcircuit it in the 99.9999999% of cases
429 if (!usb_serial_handle_sysrq_char(port, *ch)) 429 where the USB serial is not a console anyway */
430 tty_insert_flip_char(tty, *ch, TTY_NORMAL); 430 if (!port->console || !port->sysrq)
431 tty_insert_flip_string(tty, ch, urb->actual_length);
432 else {
433 /* Push data to tty */
434 for (i = 0; i < urb->actual_length; i++, ch++) {
435 if (!usb_serial_handle_sysrq_char(tty, port, *ch))
436 tty_insert_flip_char(tty, *ch, TTY_NORMAL);
437 }
431 } 438 }
432 tty_flip_buffer_push(tty); 439 tty_flip_buffer_push(tty);
433 tty_kref_put(tty); 440 tty_kref_put(tty);
@@ -527,11 +534,12 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
527 } 534 }
528} 535}
529 536
530int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) 537int usb_serial_handle_sysrq_char(struct tty_struct *tty,
538 struct usb_serial_port *port, unsigned int ch)
531{ 539{
532 if (port->sysrq && port->console) { 540 if (port->sysrq && port->console) {
533 if (ch && time_before(jiffies, port->sysrq)) { 541 if (ch && time_before(jiffies, port->sysrq)) {
534 handle_sysrq(ch, tty_port_tty_get(&port->port)); 542 handle_sysrq(ch, tty);
535 port->sysrq = 0; 543 port->sysrq = 0;
536 return 1; 544 return 1;
537 } 545 }
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index bfc5ce000ef9..ccd4dd340d2c 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -521,7 +521,7 @@ static int mos7720_chars_in_buffer(struct tty_struct *tty)
521 mos7720_port = usb_get_serial_port_data(port); 521 mos7720_port = usb_get_serial_port_data(port);
522 if (mos7720_port == NULL) { 522 if (mos7720_port == NULL) {
523 dbg("%s:leaving ...........", __func__); 523 dbg("%s:leaving ...........", __func__);
524 return -ENODEV; 524 return 0;
525 } 525 }
526 526
527 for (i = 0; i < NUM_URBS; ++i) { 527 for (i = 0; i < NUM_URBS; ++i) {
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index c40f95c1951c..270009afdf77 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -26,6 +26,7 @@
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/smp_lock.h>
29#include <linux/tty.h> 30#include <linux/tty.h>
30#include <linux/tty_driver.h> 31#include <linux/tty_driver.h>
31#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
@@ -123,10 +124,13 @@
123#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 124#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
124#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 125#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
125 126
126/* This driver also supports the ATEN UC2324 device since it is mos7840 based 127/* This driver also supports
127 * - if I knew the device id it would also support the ATEN UC2322 */ 128 * ATEN UC2324 device using Moschip MCS7840
129 * ATEN UC2322 device using Moschip MCS7820
130 */
128#define USB_VENDOR_ID_ATENINTL 0x0557 131#define USB_VENDOR_ID_ATENINTL 0x0557
129#define ATENINTL_DEVICE_ID_UC2324 0x2011 132#define ATENINTL_DEVICE_ID_UC2324 0x2011
133#define ATENINTL_DEVICE_ID_UC2322 0x7820
130 134
131/* Interrupt Routine Defines */ 135/* Interrupt Routine Defines */
132 136
@@ -176,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = {
176 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 180 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
177 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 181 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
178 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 182 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
183 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
179 {} /* terminating entry */ 184 {} /* terminating entry */
180}; 185};
181 186
@@ -185,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
185 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 190 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
186 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
187 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 192 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
193 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
188 {} /* terminating entry */ 194 {} /* terminating entry */
189}; 195};
190 196
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 575816e6ba37..c784ddbe7b61 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -66,8 +66,10 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file);
66static int option_tiocmset(struct tty_struct *tty, struct file *file, 66static int option_tiocmset(struct tty_struct *tty, struct file *file,
67 unsigned int set, unsigned int clear); 67 unsigned int set, unsigned int clear);
68static int option_send_setup(struct usb_serial_port *port); 68static int option_send_setup(struct usb_serial_port *port);
69#ifdef CONFIG_PM
69static int option_suspend(struct usb_serial *serial, pm_message_t message); 70static int option_suspend(struct usb_serial *serial, pm_message_t message);
70static int option_resume(struct usb_serial *serial); 71static int option_resume(struct usb_serial *serial);
72#endif
71 73
72/* Vendor and product IDs */ 74/* Vendor and product IDs */
73#define OPTION_VENDOR_ID 0x0AF0 75#define OPTION_VENDOR_ID 0x0AF0
@@ -205,7 +207,9 @@ static int option_resume(struct usb_serial *serial);
205#define NOVATELWIRELESS_PRODUCT_MC727 0x4100 207#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
206#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 208#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
207#define NOVATELWIRELESS_PRODUCT_U727 0x5010 209#define NOVATELWIRELESS_PRODUCT_U727 0x5010
210#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
208#define NOVATELWIRELESS_PRODUCT_MC760 0x6000 211#define NOVATELWIRELESS_PRODUCT_MC760 0x6000
212#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
209 213
210/* FUTURE NOVATEL PRODUCTS */ 214/* FUTURE NOVATEL PRODUCTS */
211#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 215#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
@@ -258,11 +262,6 @@ static int option_resume(struct usb_serial *serial);
258#define AXESSTEL_VENDOR_ID 0x1726 262#define AXESSTEL_VENDOR_ID 0x1726
259#define AXESSTEL_PRODUCT_MV110H 0x1000 263#define AXESSTEL_PRODUCT_MV110H 0x1000
260 264
261#define ONDA_VENDOR_ID 0x19d2
262#define ONDA_PRODUCT_MSA501HS 0x0001
263#define ONDA_PRODUCT_ET502HS 0x0002
264#define ONDA_PRODUCT_MT503HS 0x2000
265
266#define BANDRICH_VENDOR_ID 0x1A8D 265#define BANDRICH_VENDOR_ID 0x1A8D
267#define BANDRICH_PRODUCT_C100_1 0x1002 266#define BANDRICH_PRODUCT_C100_1 0x1002
268#define BANDRICH_PRODUCT_C100_2 0x1003 267#define BANDRICH_PRODUCT_C100_2 0x1003
@@ -300,6 +299,7 @@ static int option_resume(struct usb_serial *serial);
300#define ZTE_PRODUCT_MF628 0x0015 299#define ZTE_PRODUCT_MF628 0x0015
301#define ZTE_PRODUCT_MF626 0x0031 300#define ZTE_PRODUCT_MF626 0x0031
302#define ZTE_PRODUCT_CDMA_TECH 0xfffe 301#define ZTE_PRODUCT_CDMA_TECH 0xfffe
302#define ZTE_PRODUCT_AC8710 0xfff1
303 303
304#define BENQ_VENDOR_ID 0x04a5 304#define BENQ_VENDOR_ID 0x04a5
305#define BENQ_PRODUCT_H10 0x4068 305#define BENQ_PRODUCT_H10 0x4068
@@ -307,11 +307,25 @@ static int option_resume(struct usb_serial *serial);
307#define DLINK_VENDOR_ID 0x1186 307#define DLINK_VENDOR_ID 0x1186
308#define DLINK_PRODUCT_DWM_652 0x3e04 308#define DLINK_PRODUCT_DWM_652 0x3e04
309 309
310#define QISDA_VENDOR_ID 0x1da5
311#define QISDA_PRODUCT_H21_4512 0x4512
312#define QISDA_PRODUCT_H21_4523 0x4523
313#define QISDA_PRODUCT_H20_4515 0x4515
314#define QISDA_PRODUCT_H20_4519 0x4519
315
310 316
311/* TOSHIBA PRODUCTS */ 317/* TOSHIBA PRODUCTS */
312#define TOSHIBA_VENDOR_ID 0x0930 318#define TOSHIBA_VENDOR_ID 0x0930
313#define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302 319#define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302
314 320
321#define ALINK_VENDOR_ID 0x1e0e
322#define ALINK_PRODUCT_3GU 0x9200
323
324/* ALCATEL PRODUCTS */
325#define ALCATEL_VENDOR_ID 0x1bbb
326#define ALCATEL_PRODUCT_X060S 0x0000
327
328
315static struct usb_device_id option_ids[] = { 329static struct usb_device_id option_ids[] = {
316 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 330 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
317 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 331 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -428,8 +442,10 @@ static struct usb_device_id option_ids[] = {
428 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ 442 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
429 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ 443 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
430 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ 444 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
445 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */
431 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ 446 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
432 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ 447 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
448 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */
433 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ 449 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
434 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ 450 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
435 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ 451 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
@@ -463,42 +479,6 @@ static struct usb_device_id option_ids[] = {
463 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, 479 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
464 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, 480 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
465 { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, 481 { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
466 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) },
467 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
468 { USB_DEVICE(ONDA_VENDOR_ID, 0x0003) },
469 { USB_DEVICE(ONDA_VENDOR_ID, 0x0004) },
470 { USB_DEVICE(ONDA_VENDOR_ID, 0x0005) },
471 { USB_DEVICE(ONDA_VENDOR_ID, 0x0006) },
472 { USB_DEVICE(ONDA_VENDOR_ID, 0x0007) },
473 { USB_DEVICE(ONDA_VENDOR_ID, 0x0008) },
474 { USB_DEVICE(ONDA_VENDOR_ID, 0x0009) },
475 { USB_DEVICE(ONDA_VENDOR_ID, 0x000a) },
476 { USB_DEVICE(ONDA_VENDOR_ID, 0x000b) },
477 { USB_DEVICE(ONDA_VENDOR_ID, 0x000c) },
478 { USB_DEVICE(ONDA_VENDOR_ID, 0x000d) },
479 { USB_DEVICE(ONDA_VENDOR_ID, 0x000e) },
480 { USB_DEVICE(ONDA_VENDOR_ID, 0x000f) },
481 { USB_DEVICE(ONDA_VENDOR_ID, 0x0010) },
482 { USB_DEVICE(ONDA_VENDOR_ID, 0x0011) },
483 { USB_DEVICE(ONDA_VENDOR_ID, 0x0012) },
484 { USB_DEVICE(ONDA_VENDOR_ID, 0x0013) },
485 { USB_DEVICE(ONDA_VENDOR_ID, 0x0014) },
486 { USB_DEVICE(ONDA_VENDOR_ID, 0x0015) },
487 { USB_DEVICE(ONDA_VENDOR_ID, 0x0016) },
488 { USB_DEVICE(ONDA_VENDOR_ID, 0x0017) },
489 { USB_DEVICE(ONDA_VENDOR_ID, 0x0018) },
490 { USB_DEVICE(ONDA_VENDOR_ID, 0x0019) },
491 { USB_DEVICE(ONDA_VENDOR_ID, 0x0020) },
492 { USB_DEVICE(ONDA_VENDOR_ID, 0x0021) },
493 { USB_DEVICE(ONDA_VENDOR_ID, 0x0022) },
494 { USB_DEVICE(ONDA_VENDOR_ID, 0x0023) },
495 { USB_DEVICE(ONDA_VENDOR_ID, 0x0024) },
496 { USB_DEVICE(ONDA_VENDOR_ID, 0x0025) },
497 { USB_DEVICE(ONDA_VENDOR_ID, 0x0026) },
498 { USB_DEVICE(ONDA_VENDOR_ID, 0x0027) },
499 { USB_DEVICE(ONDA_VENDOR_ID, 0x0028) },
500 { USB_DEVICE(ONDA_VENDOR_ID, 0x0029) },
501 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) },
502 { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, 482 { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) },
503 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, 483 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
504 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, 484 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
@@ -523,14 +503,85 @@ static struct usb_device_id option_ids[] = {
523 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 503 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
524 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ 504 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
525 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 505 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
526 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) }, 506 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
527 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, 507 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) },
528 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, 508 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
529 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, 509 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
510 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
511 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) },
512 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) },
513 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) },
514 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) },
515 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) },
516 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) },
517 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) },
518 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) },
519 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) },
520 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) },
521 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
522 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
523 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
524 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
525 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
526 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
527 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
528 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
529 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
530 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
531 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) },
532 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
533 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
534 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
535 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
536 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) },
537 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
538 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
539 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
540 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) },
541 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
542 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
543 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
544 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
545 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
546 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
547 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
548 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
549 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
550 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
551 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
552 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
553 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
554 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
555 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
556 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
557 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
558 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
559 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
560 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
561 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
562 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
563 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
564 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
565 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
566 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
567 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
568 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
569 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
570 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
571 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
572 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
573 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
574 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
530 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 575 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
531 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 576 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
532 { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */ 577 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
578 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
579 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
580 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) },
533 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ 581 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
582 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
583 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
584 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
534 { } /* Terminating entry */ 585 { } /* Terminating entry */
535}; 586};
536MODULE_DEVICE_TABLE(usb, option_ids); 587MODULE_DEVICE_TABLE(usb, option_ids);
@@ -539,8 +590,10 @@ static struct usb_driver option_driver = {
539 .name = "option", 590 .name = "option",
540 .probe = usb_serial_probe, 591 .probe = usb_serial_probe,
541 .disconnect = usb_serial_disconnect, 592 .disconnect = usb_serial_disconnect,
593#ifdef CONFIG_PM
542 .suspend = usb_serial_suspend, 594 .suspend = usb_serial_suspend,
543 .resume = usb_serial_resume, 595 .resume = usb_serial_resume,
596#endif
544 .id_table = option_ids, 597 .id_table = option_ids,
545 .no_dynamic_id = 1, 598 .no_dynamic_id = 1,
546}; 599};
@@ -572,8 +625,10 @@ static struct usb_serial_driver option_1port_device = {
572 .disconnect = option_disconnect, 625 .disconnect = option_disconnect,
573 .release = option_release, 626 .release = option_release,
574 .read_int_callback = option_instat_callback, 627 .read_int_callback = option_instat_callback,
628#ifdef CONFIG_PM
575 .suspend = option_suspend, 629 .suspend = option_suspend,
576 .resume = option_resume, 630 .resume = option_resume,
631#endif
577}; 632};
578 633
579static int debug; 634static int debug;
@@ -732,7 +787,6 @@ static int option_write(struct tty_struct *tty, struct usb_serial_port *port,
732 memcpy(this_urb->transfer_buffer, buf, todo); 787 memcpy(this_urb->transfer_buffer, buf, todo);
733 this_urb->transfer_buffer_length = todo; 788 this_urb->transfer_buffer_length = todo;
734 789
735 this_urb->dev = port->serial->dev;
736 err = usb_submit_urb(this_urb, GFP_ATOMIC); 790 err = usb_submit_urb(this_urb, GFP_ATOMIC);
737 if (err) { 791 if (err) {
738 dbg("usb_submit_urb %p (write bulk) failed " 792 dbg("usb_submit_urb %p (write bulk) failed "
@@ -816,7 +870,6 @@ static void option_instat_callback(struct urb *urb)
816 int status = urb->status; 870 int status = urb->status;
817 struct usb_serial_port *port = urb->context; 871 struct usb_serial_port *port = urb->context;
818 struct option_port_private *portdata = usb_get_serial_port_data(port); 872 struct option_port_private *portdata = usb_get_serial_port_data(port);
819 struct usb_serial *serial = port->serial;
820 873
821 dbg("%s", __func__); 874 dbg("%s", __func__);
822 dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); 875 dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
@@ -860,7 +913,6 @@ static void option_instat_callback(struct urb *urb)
860 913
861 /* Resubmit urb so we continue receiving IRQ data */ 914 /* Resubmit urb so we continue receiving IRQ data */
862 if (status != -ESHUTDOWN && status != -ENOENT) { 915 if (status != -ESHUTDOWN && status != -ENOENT) {
863 urb->dev = serial->dev;
864 err = usb_submit_urb(urb, GFP_ATOMIC); 916 err = usb_submit_urb(urb, GFP_ATOMIC);
865 if (err) 917 if (err)
866 dbg("%s: resubmit intr urb failed. (%d)", 918 dbg("%s: resubmit intr urb failed. (%d)",
@@ -913,7 +965,6 @@ static int option_open(struct tty_struct *tty,
913 struct usb_serial_port *port, struct file *filp) 965 struct usb_serial_port *port, struct file *filp)
914{ 966{
915 struct option_port_private *portdata; 967 struct option_port_private *portdata;
916 struct usb_serial *serial = port->serial;
917 int i, err; 968 int i, err;
918 struct urb *urb; 969 struct urb *urb;
919 970
@@ -921,23 +972,11 @@ static int option_open(struct tty_struct *tty,
921 972
922 dbg("%s", __func__); 973 dbg("%s", __func__);
923 974
924 /* Reset low level data toggle and start reading from endpoints */ 975 /* Start reading from the IN endpoint */
925 for (i = 0; i < N_IN_URB; i++) { 976 for (i = 0; i < N_IN_URB; i++) {
926 urb = portdata->in_urbs[i]; 977 urb = portdata->in_urbs[i];
927 if (!urb) 978 if (!urb)
928 continue; 979 continue;
929 if (urb->dev != serial->dev) {
930 dbg("%s: dev %p != %p", __func__,
931 urb->dev, serial->dev);
932 continue;
933 }
934
935 /*
936 * make sure endpoint data toggle is synchronized with the
937 * device
938 */
939 usb_clear_halt(urb->dev, urb->pipe);
940
941 err = usb_submit_urb(urb, GFP_KERNEL); 980 err = usb_submit_urb(urb, GFP_KERNEL);
942 if (err) { 981 if (err) {
943 dbg("%s: submit urb %d failed (%d) %d", 982 dbg("%s: submit urb %d failed (%d) %d",
@@ -946,16 +985,6 @@ static int option_open(struct tty_struct *tty,
946 } 985 }
947 } 986 }
948 987
949 /* Reset low level data toggle on out endpoints */
950 for (i = 0; i < N_OUT_URB; i++) {
951 urb = portdata->out_urbs[i];
952 if (!urb)
953 continue;
954 urb->dev = serial->dev;
955 /* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
956 usb_pipeout(urb->pipe), 0); */
957 }
958
959 option_send_setup(port); 988 option_send_setup(port);
960 989
961 return 0; 990 return 0;
@@ -1195,6 +1224,7 @@ static void option_release(struct usb_serial *serial)
1195 } 1224 }
1196} 1225}
1197 1226
1227#ifdef CONFIG_PM
1198static int option_suspend(struct usb_serial *serial, pm_message_t message) 1228static int option_suspend(struct usb_serial *serial, pm_message_t message)
1199{ 1229{
1200 dbg("%s entered", __func__); 1230 dbg("%s entered", __func__);
@@ -1218,7 +1248,6 @@ static int option_resume(struct usb_serial *serial)
1218 dbg("%s: No interrupt URB for port %d\n", __func__, i); 1248 dbg("%s: No interrupt URB for port %d\n", __func__, i);
1219 continue; 1249 continue;
1220 } 1250 }
1221 port->interrupt_in_urb->dev = serial->dev;
1222 err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); 1251 err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
1223 dbg("Submitted interrupt URB for port %d (result %d)", i, err); 1252 dbg("Submitted interrupt URB for port %d (result %d)", i, err);
1224 if (err < 0) { 1253 if (err < 0) {
@@ -1254,6 +1283,7 @@ static int option_resume(struct usb_serial *serial)
1254 } 1283 }
1255 return 0; 1284 return 0;
1256} 1285}
1286#endif
1257 1287
1258MODULE_AUTHOR(DRIVER_AUTHOR); 1288MODULE_AUTHOR(DRIVER_AUTHOR);
1259MODULE_DESCRIPTION(DRIVER_DESC); 1289MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index ec6c132a25b5..3e86815b2705 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -94,6 +94,8 @@ static struct usb_device_id id_table [] = {
94 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, 94 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
95 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, 95 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
96 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, 96 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
97 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
98 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
97 { } /* Terminating entry */ 99 { } /* Terminating entry */
98}; 100};
99 101
@@ -971,18 +973,46 @@ exit:
971 __func__, retval); 973 __func__, retval);
972} 974}
973 975
976static void pl2303_push_data(struct tty_struct *tty,
977 struct usb_serial_port *port, struct urb *urb,
978 u8 line_status)
979{
980 unsigned char *data = urb->transfer_buffer;
981 /* get tty_flag from status */
982 char tty_flag = TTY_NORMAL;
983 /* break takes precedence over parity, */
984 /* which takes precedence over framing errors */
985 if (line_status & UART_BREAK_ERROR)
986 tty_flag = TTY_BREAK;
987 else if (line_status & UART_PARITY_ERROR)
988 tty_flag = TTY_PARITY;
989 else if (line_status & UART_FRAME_ERROR)
990 tty_flag = TTY_FRAME;
991 dbg("%s - tty_flag = %d", __func__, tty_flag);
992
993 tty_buffer_request_room(tty, urb->actual_length + 1);
994 /* overrun is special, not associated with a char */
995 if (line_status & UART_OVERRUN_ERROR)
996 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
997 if (port->console && port->sysrq) {
998 int i;
999 for (i = 0; i < urb->actual_length; ++i)
1000 if (!usb_serial_handle_sysrq_char(tty, port, data[i]))
1001 tty_insert_flip_char(tty, data[i], tty_flag);
1002 } else
1003 tty_insert_flip_string(tty, data, urb->actual_length);
1004 tty_flip_buffer_push(tty);
1005}
1006
974static void pl2303_read_bulk_callback(struct urb *urb) 1007static void pl2303_read_bulk_callback(struct urb *urb)
975{ 1008{
976 struct usb_serial_port *port = urb->context; 1009 struct usb_serial_port *port = urb->context;
977 struct pl2303_private *priv = usb_get_serial_port_data(port); 1010 struct pl2303_private *priv = usb_get_serial_port_data(port);
978 struct tty_struct *tty; 1011 struct tty_struct *tty;
979 unsigned char *data = urb->transfer_buffer;
980 unsigned long flags; 1012 unsigned long flags;
981 int i;
982 int result; 1013 int result;
983 int status = urb->status; 1014 int status = urb->status;
984 u8 line_status; 1015 u8 line_status;
985 char tty_flag;
986 1016
987 dbg("%s - port %d", __func__, port->number); 1017 dbg("%s - port %d", __func__, port->number);
988 1018
@@ -1010,10 +1040,7 @@ static void pl2303_read_bulk_callback(struct urb *urb)
1010 } 1040 }
1011 1041
1012 usb_serial_debug_data(debug, &port->dev, __func__, 1042 usb_serial_debug_data(debug, &port->dev, __func__,
1013 urb->actual_length, data); 1043 urb->actual_length, urb->transfer_buffer);
1014
1015 /* get tty_flag from status */
1016 tty_flag = TTY_NORMAL;
1017 1044
1018 spin_lock_irqsave(&priv->lock, flags); 1045 spin_lock_irqsave(&priv->lock, flags);
1019 line_status = priv->line_status; 1046 line_status = priv->line_status;
@@ -1021,26 +1048,9 @@ static void pl2303_read_bulk_callback(struct urb *urb)
1021 spin_unlock_irqrestore(&priv->lock, flags); 1048 spin_unlock_irqrestore(&priv->lock, flags);
1022 wake_up_interruptible(&priv->delta_msr_wait); 1049 wake_up_interruptible(&priv->delta_msr_wait);
1023 1050
1024 /* break takes precedence over parity, */
1025 /* which takes precedence over framing errors */
1026 if (line_status & UART_BREAK_ERROR)
1027 tty_flag = TTY_BREAK;
1028 else if (line_status & UART_PARITY_ERROR)
1029 tty_flag = TTY_PARITY;
1030 else if (line_status & UART_FRAME_ERROR)
1031 tty_flag = TTY_FRAME;
1032 dbg("%s - tty_flag = %d", __func__, tty_flag);
1033
1034 tty = tty_port_tty_get(&port->port); 1051 tty = tty_port_tty_get(&port->port);
1035 if (tty && urb->actual_length) { 1052 if (tty && urb->actual_length) {
1036 tty_buffer_request_room(tty, urb->actual_length + 1); 1053 pl2303_push_data(tty, port, urb, line_status);
1037 /* overrun is special, not associated with a char */
1038 if (line_status & UART_OVERRUN_ERROR)
1039 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1040 for (i = 0; i < urb->actual_length; ++i)
1041 if (!usb_serial_handle_sysrq_char(port, data[i]))
1042 tty_insert_flip_char(tty, data[i], tty_flag);
1043 tty_flip_buffer_push(tty);
1044 } 1054 }
1045 tty_kref_put(tty); 1055 tty_kref_put(tty);
1046 /* Schedule the next read _if_ we are still open */ 1056 /* Schedule the next read _if_ we are still open */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 1d7a22e3a9fd..ee9505e1dd92 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -122,3 +122,11 @@
122/* Hewlett-Packard LD220-HP POS Pole Display */ 122/* Hewlett-Packard LD220-HP POS Pole Display */
123#define HP_VENDOR_ID 0x03f0 123#define HP_VENDOR_ID 0x03f0
124#define HP_LD220_PRODUCT_ID 0x3524 124#define HP_LD220_PRODUCT_ID 0x3524
125
126/* Cressi Edy (diving computer) PC interface */
127#define CRESSI_VENDOR_ID 0x04b8
128#define CRESSI_EDY_PRODUCT_ID 0x0521
129
130/* Sony, USB data cable for CMD-Jxx mobile phones */
131#define SONY_VENDOR_ID 0x054c
132#define SONY_QN3USB_PRODUCT_ID 0x0437
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 032f7aeb40a4..f48d05e0acc1 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -181,35 +181,50 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
181}; 181};
182 182
183static struct usb_device_id id_table [] = { 183static struct usb_device_id id_table [] = {
184 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
185 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
186 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
187
184 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ 188 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
185 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ 189 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
186 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ 190 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
187 { USB_DEVICE(0x03f0, 0x1b1d) }, /* HP ev2200 a.k.a MC5720 */
188 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ 191 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
189 { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */
190 { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ 192 { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
193 { USB_DEVICE(0x1199, 0x0022) }, /* Sierra Wireless EM5725 */
194 { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */
195 { USB_DEVICE(0x1199, 0x0224) }, /* Sierra Wireless MC5727 */
191 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ 196 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
192 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ 197 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
198 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
193 { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ 199 { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
194 /* Sierra Wireless C597 */ 200 /* Sierra Wireless C597 */
195 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, 201 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
196 /* Sierra Wireless Device */ 202 /* Sierra Wireless T598 */
197 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, 203 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) },
198 { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */ 204 { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless T11 */
199 { USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless Device */ 205 { USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless AC402 */
200 { USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless Device */ 206 { USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless MC5728 */
207 { USB_DEVICE(0x1199, 0x0029) }, /* Sierra Wireless Device */
201 208
202 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ 209 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
203 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
204 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ 210 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
211 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
212 { USB_DEVICE(0x1199, 0x6805) }, /* Sierra Wireless MC8765 */
213 { USB_DEVICE(0x1199, 0x6808) }, /* Sierra Wireless MC8755 */
214 { USB_DEVICE(0x1199, 0x6809) }, /* Sierra Wireless MC8765 */
205 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ 215 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
206 { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Lenovo) */ 216 { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 */
207 { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ 217 { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */
208 { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */ 218 { USB_DEVICE(0x1199, 0x6816) }, /* Sierra Wireless MC8775 */
209 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ 219 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
210 { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ 220 { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */
221 { USB_DEVICE(0x1199, 0x6822) }, /* Sierra Wireless AirCard 875E */
211 { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ 222 { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */
212 { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ 223 { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
224 { USB_DEVICE(0x1199, 0x6834) }, /* Sierra Wireless MC8780 */
225 { USB_DEVICE(0x1199, 0x6835) }, /* Sierra Wireless MC8781 */
226 { USB_DEVICE(0x1199, 0x6838) }, /* Sierra Wireless MC8780 */
227 { USB_DEVICE(0x1199, 0x6839) }, /* Sierra Wireless MC8781 */
213 { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */ 228 { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */
214 { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ 229 { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
215 /* Sierra Wireless MC8790, MC8791, MC8792 Composite */ 230 /* Sierra Wireless MC8790, MC8791, MC8792 Composite */
@@ -227,16 +242,13 @@ static struct usb_device_id id_table [] = {
227 { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ 242 { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
228 /* Sierra Wireless C885 */ 243 /* Sierra Wireless C885 */
229 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, 244 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
230 /* Sierra Wireless Device */ 245 /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */
231 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, 246 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)},
232 /* Sierra Wireless Device */ 247 /* Sierra Wireless C22/C33 */
233 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6891, 0xFF, 0xFF, 0xFF)}, 248 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6891, 0xFF, 0xFF, 0xFF)},
234 /* Sierra Wireless Device */ 249 /* Sierra Wireless HSPA Non-Composite Device */
235 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, 250 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
236 251 { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
237 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
238 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
239
240 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 252 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
241 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 253 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
242 }, 254 },
@@ -814,7 +826,7 @@ static int sierra_startup(struct usb_serial *serial)
814 return 0; 826 return 0;
815} 827}
816 828
817static void sierra_disconnect(struct usb_serial *serial) 829static void sierra_release(struct usb_serial *serial)
818{ 830{
819 int i; 831 int i;
820 struct usb_serial_port *port; 832 struct usb_serial_port *port;
@@ -830,7 +842,6 @@ static void sierra_disconnect(struct usb_serial *serial)
830 if (!portdata) 842 if (!portdata)
831 continue; 843 continue;
832 kfree(portdata); 844 kfree(portdata);
833 usb_set_serial_port_data(port, NULL);
834 } 845 }
835} 846}
836 847
@@ -853,7 +864,7 @@ static struct usb_serial_driver sierra_device = {
853 .tiocmget = sierra_tiocmget, 864 .tiocmget = sierra_tiocmget,
854 .tiocmset = sierra_tiocmset, 865 .tiocmset = sierra_tiocmset,
855 .attach = sierra_startup, 866 .attach = sierra_startup,
856 .disconnect = sierra_disconnect, 867 .release = sierra_release,
857 .read_int_callback = sierra_instat_callback, 868 .read_int_callback = sierra_instat_callback,
858}; 869};
859 870
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 991d8232e376..3bc609fe2242 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -191,7 +191,6 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
191 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, 191 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
192 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, 192 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
193 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, 193 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
194 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
195}; 194};
196 195
197static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = { 196static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = {
@@ -728,7 +727,7 @@ static int ti_write_room(struct tty_struct *tty)
728 dbg("%s - port %d", __func__, port->number); 727 dbg("%s - port %d", __func__, port->number);
729 728
730 if (tport == NULL) 729 if (tport == NULL)
731 return -ENODEV; 730 return 0;
732 731
733 spin_lock_irqsave(&tport->tp_lock, flags); 732 spin_lock_irqsave(&tport->tp_lock, flags);
734 room = ti_buf_space_avail(tport->tp_write_buf); 733 room = ti_buf_space_avail(tport->tp_write_buf);
@@ -749,7 +748,7 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
749 dbg("%s - port %d", __func__, port->number); 748 dbg("%s - port %d", __func__, port->number);
750 749
751 if (tport == NULL) 750 if (tport == NULL)
752 return -ENODEV; 751 return 0;
753 752
754 spin_lock_irqsave(&tport->tp_lock, flags); 753 spin_lock_irqsave(&tport->tp_lock, flags);
755 chars = ti_buf_data_avail(tport->tp_write_buf); 754 chars = ti_buf_data_avail(tport->tp_write_buf);
@@ -1658,7 +1657,7 @@ static int ti_do_download(struct usb_device *dev, int pipe,
1658 u8 cs = 0; 1657 u8 cs = 0;
1659 int done; 1658 int done;
1660 struct ti_firmware_header *header; 1659 struct ti_firmware_header *header;
1661 int status; 1660 int status = 0;
1662 int len; 1661 int len;
1663 1662
1664 for (pos = sizeof(struct ti_firmware_header); pos < size; pos++) 1663 for (pos = sizeof(struct ti_firmware_header); pos < size; pos++)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index d595aa5586a7..99188c92068b 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -21,6 +21,7 @@
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/smp_lock.h>
24#include <linux/tty.h> 25#include <linux/tty.h>
25#include <linux/tty_driver.h> 26#include <linux/tty_driver.h>
26#include <linux/tty_flip.h> 27#include <linux/tty_flip.h>
@@ -31,6 +32,7 @@
31#include <linux/mutex.h> 32#include <linux/mutex.h>
32#include <linux/list.h> 33#include <linux/list.h>
33#include <linux/uaccess.h> 34#include <linux/uaccess.h>
35#include <linux/serial.h>
34#include <linux/usb.h> 36#include <linux/usb.h>
35#include <linux/usb/serial.h> 37#include <linux/usb/serial.h>
36#include "pl2303.h" 38#include "pl2303.h"
@@ -183,6 +185,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
183 struct usb_serial_port *port; 185 struct usb_serial_port *port;
184 unsigned int portNumber; 186 unsigned int portNumber;
185 int retval = 0; 187 int retval = 0;
188 int first = 0;
186 189
187 dbg("%s", __func__); 190 dbg("%s", __func__);
188 191
@@ -220,8 +223,9 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
220 tty->driver_data = port; 223 tty->driver_data = port;
221 tty_port_tty_set(&port->port, tty); 224 tty_port_tty_set(&port->port, tty);
222 225
223 if (port->port.count == 1) { 226 /* If the console is attached, the device is already open */
224 227 if (port->port.count == 1 && !port->console) {
228 first = 1;
225 /* lock this module before we call it 229 /* lock this module before we call it
226 * this may fail, which means we must bail out, 230 * this may fail, which means we must bail out,
227 * safe because we are called with BKL held */ 231 * safe because we are called with BKL held */
@@ -244,13 +248,21 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
244 if (retval) 248 if (retval)
245 goto bailout_interface_put; 249 goto bailout_interface_put;
246 mutex_unlock(&serial->disc_mutex); 250 mutex_unlock(&serial->disc_mutex);
251 set_bit(ASYNCB_INITIALIZED, &port->port.flags);
247 } 252 }
248 mutex_unlock(&port->mutex); 253 mutex_unlock(&port->mutex);
249 /* Now do the correct tty layer semantics */ 254 /* Now do the correct tty layer semantics */
250 retval = tty_port_block_til_ready(&port->port, tty, filp); 255 retval = tty_port_block_til_ready(&port->port, tty, filp);
251 if (retval == 0) 256 if (retval == 0) {
257 if (!first)
258 usb_serial_put(serial);
252 return 0; 259 return 0;
253 260 }
261 mutex_lock(&port->mutex);
262 if (first == 0)
263 goto bailout_mutex_unlock;
264 /* Undo the initial port actions */
265 mutex_lock(&serial->disc_mutex);
254bailout_interface_put: 266bailout_interface_put:
255 usb_autopm_put_interface(serial->interface); 267 usb_autopm_put_interface(serial->interface);
256bailout_module_put: 268bailout_module_put:
@@ -333,8 +345,27 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
333{ 345{
334 struct usb_serial_port *port = tty->driver_data; 346 struct usb_serial_port *port = tty->driver_data;
335 347
348 if (!port)
349 return;
350
336 dbg("%s - port %d", __func__, port->number); 351 dbg("%s - port %d", __func__, port->number);
337 352
353 /* FIXME:
354 This leaves a very narrow race. Really we should do the
355 serial_do_free() on tty->shutdown(), but tty->shutdown can
356 be called from IRQ context and serial_do_free can sleep.
357
358 The right fix is probably to make the tty free (which is rare)
359 and thus tty->shutdown() occur via a work queue and simplify all
360 the drivers that use it.
361 */
362 if (tty_hung_up_p(filp)) {
363 /* serial_hangup already called serial_down at this point.
364 Another user may have already reopened the port but
365 serial_do_free is refcounted */
366 serial_do_free(port);
367 return;
368 }
338 369
339 if (tty_port_close_start(&port->port, tty, filp) == 0) 370 if (tty_port_close_start(&port->port, tty, filp) == 0)
340 return; 371 return;
@@ -350,7 +381,8 @@ static void serial_hangup(struct tty_struct *tty)
350 struct usb_serial_port *port = tty->driver_data; 381 struct usb_serial_port *port = tty->driver_data;
351 serial_do_down(port); 382 serial_do_down(port);
352 tty_port_hangup(&port->port); 383 tty_port_hangup(&port->port);
353 serial_do_free(port); 384 /* We must not free port yet - the USB serial layer depends on it's
385 continued existence */
354} 386}
355 387
356static int serial_write(struct tty_struct *tty, const unsigned char *buf, 388static int serial_write(struct tty_struct *tty, const unsigned char *buf,
@@ -389,7 +421,6 @@ static int serial_chars_in_buffer(struct tty_struct *tty)
389 struct usb_serial_port *port = tty->driver_data; 421 struct usb_serial_port *port = tty->driver_data;
390 dbg("%s = port %d", __func__, port->number); 422 dbg("%s = port %d", __func__, port->number);
391 423
392 WARN_ON(!port->port.count);
393 /* if the device was unplugged then any remaining characters 424 /* if the device was unplugged then any remaining characters
394 fell out of the connector ;) */ 425 fell out of the connector ;) */
395 if (port->serial->disconnected) 426 if (port->serial->disconnected)
diff --git a/drivers/usb/storage/option_ms.c b/drivers/usb/storage/option_ms.c
index d41cc0a970f7..773a5cd38c5a 100644
--- a/drivers/usb/storage/option_ms.c
+++ b/drivers/usb/storage/option_ms.c
@@ -118,6 +118,9 @@ static int option_inquiry(struct us_data *us)
118 118
119 result = memcmp(buffer+8, "Option", 6); 119 result = memcmp(buffer+8, "Option", 6);
120 120
121 if (result != 0)
122 result = memcmp(buffer+8, "ZCOPTION", 8);
123
121 /* Read the CSW */ 124 /* Read the CSW */
122 usb_stor_bulk_transfer_buf(us, 125 usb_stor_bulk_transfer_buf(us,
123 us->recv_bulk_pipe, 126 us->recv_bulk_pipe,
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index fcb320217218..e20dc525d177 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
961 US_BULK_GET_MAX_LUN, 961 US_BULK_GET_MAX_LUN,
962 USB_DIR_IN | USB_TYPE_CLASS | 962 USB_DIR_IN | USB_TYPE_CLASS |
963 USB_RECIP_INTERFACE, 963 USB_RECIP_INTERFACE,
964 0, us->ifnum, us->iobuf, 1, HZ); 964 0, us->ifnum, us->iobuf, 1, 10*HZ);
965 965
966 US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", 966 US_DEBUGP("GetMaxLUN command result is %d, data is %d\n",
967 result, us->iobuf[0]); 967 result, us->iobuf[0]);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1b9c5dd0fb27..7477d411959f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -838,6 +838,13 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001,
838 US_SC_DEVICE, US_PR_DEVICE, NULL, 838 US_SC_DEVICE, US_PR_DEVICE, NULL,
839 US_FL_FIX_CAPACITY ), 839 US_FL_FIX_CAPACITY ),
840 840
841/* Reported by Rogerio Brito <rbrito@ime.usp.br> */
842UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001,
843 "Prolific Technology, Inc.",
844 "Mass Storage Device",
845 US_SC_DEVICE, US_PR_DEVICE, NULL,
846 US_FL_NOT_LOCKABLE ),
847
841/* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */ 848/* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */
842/* Change to bcdDeviceMin (0x0100 to 0x0001) reported by 849/* Change to bcdDeviceMin (0x0100 to 0x0001) reported by
843 * Thomas Bartosik <tbartdev@gmx-topmail.de> */ 850 * Thomas Bartosik <tbartdev@gmx-topmail.de> */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d6d65ef85f54..3b54b3940178 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -616,6 +616,8 @@ config FB_STI
616 select FB_CFB_FILLRECT 616 select FB_CFB_FILLRECT
617 select FB_CFB_COPYAREA 617 select FB_CFB_COPYAREA
618 select FB_CFB_IMAGEBLIT 618 select FB_CFB_IMAGEBLIT
619 select STI_CONSOLE
620 select VT
619 default y 621 default y
620 ---help--- 622 ---help---
621 STI refers to the HP "Standard Text Interface" which is a set of 623 STI refers to the HP "Standard Text Interface" which is a set of
@@ -1117,12 +1119,13 @@ config FB_CARILLO_RANCH
1117 1119
1118config FB_INTEL 1120config FB_INTEL
1119 tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)" 1121 tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
1120 depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL 1122 depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EMBEDDED
1121 select FB_MODE_HELPERS 1123 select FB_MODE_HELPERS
1122 select FB_CFB_FILLRECT 1124 select FB_CFB_FILLRECT
1123 select FB_CFB_COPYAREA 1125 select FB_CFB_COPYAREA
1124 select FB_CFB_IMAGEBLIT 1126 select FB_CFB_IMAGEBLIT
1125 select FB_BOOT_VESA_SUPPORT if FB_INTEL = y 1127 select FB_BOOT_VESA_SUPPORT if FB_INTEL = y
1128 depends on !DRM_I915
1126 help 1129 help
1127 This driver supports the on-board graphics built in to the Intel 1130 This driver supports the on-board graphics built in to the Intel
1128 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets. 1131 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index fb8163d181ab..a21efcd10b78 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -226,9 +226,10 @@ static int clcdfb_set_par(struct fb_info *info)
226 clcdfb_enable(fb, regs.cntl); 226 clcdfb_enable(fb, regs.cntl);
227 227
228#ifdef DEBUG 228#ifdef DEBUG
229 printk(KERN_INFO "CLCD: Registers set to\n" 229 printk(KERN_INFO
230 KERN_INFO " %08x %08x %08x %08x\n" 230 "CLCD: Registers set to\n"
231 KERN_INFO " %08x %08x %08x %08x\n", 231 " %08x %08x %08x %08x\n"
232 " %08x %08x %08x %08x\n",
232 readl(fb->regs + CLCD_TIM0), readl(fb->regs + CLCD_TIM1), 233 readl(fb->regs + CLCD_TIM0), readl(fb->regs + CLCD_TIM1),
233 readl(fb->regs + CLCD_TIM2), readl(fb->regs + CLCD_TIM3), 234 readl(fb->regs + CLCD_TIM2), readl(fb->regs + CLCD_TIM3),
234 readl(fb->regs + CLCD_UBAS), readl(fb->regs + CLCD_LBAS), 235 readl(fb->regs + CLCD_UBAS), readl(fb->regs + CLCD_LBAS),
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 018850c116c6..8cd279be74e5 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2405,6 +2405,9 @@ static int do_fb_set_var(struct fb_var_screeninfo *var, int isactive)
2405 return 0; 2405 return 0;
2406} 2406}
2407 2407
2408/* fbhw->encode_fix() must be called with fb_info->mm_lock held
2409 * if it is called after the register_framebuffer() - not a case here
2410 */
2408static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info) 2411static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
2409{ 2412{
2410 struct atafb_par par; 2413 struct atafb_par par;
@@ -2414,7 +2417,8 @@ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
2414 if (err) 2417 if (err)
2415 return err; 2418 return err;
2416 memset(fix, 0, sizeof(struct fb_fix_screeninfo)); 2419 memset(fix, 0, sizeof(struct fb_fix_screeninfo));
2417 return fbhw->encode_fix(fix, &par); 2420 err = fbhw->encode_fix(fix, &par);
2421 return err;
2418} 2422}
2419 2423
2420static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info) 2424static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2743,7 +2747,9 @@ static int atafb_set_par(struct fb_info *info)
2743 2747
2744 /* Decode wanted screen parameters */ 2748 /* Decode wanted screen parameters */
2745 fbhw->decode_var(&info->var, par); 2749 fbhw->decode_var(&info->var, par);
2750 mutex_lock(&info->mm_lock);
2746 fbhw->encode_fix(&info->fix, par); 2751 fbhw->encode_fix(&info->fix, par);
2752 mutex_unlock(&info->mm_lock);
2747 2753
2748 /* Set new videomode */ 2754 /* Set new videomode */
2749 ata_set_par(par); 2755 ata_set_par(par);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 5afd64482f55..da05f0801bb7 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -261,6 +261,9 @@ static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo)
261/** 261/**
262 * atmel_lcdfb_alloc_video_memory - Allocate framebuffer memory 262 * atmel_lcdfb_alloc_video_memory - Allocate framebuffer memory
263 * @sinfo: the frame buffer to allocate memory for 263 * @sinfo: the frame buffer to allocate memory for
264 *
265 * This function is called only from the atmel_lcdfb_probe()
266 * so no locking by fb_info->mm_lock around smem_len setting is needed.
264 */ 267 */
265static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo) 268static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
266{ 269{
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index 7691e73823d3..1f39a62f899b 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -187,6 +187,8 @@ struct atyfb_par {
187 int mtrr_reg; 187 int mtrr_reg;
188#endif 188#endif
189 u32 mem_cntl; 189 u32 mem_cntl;
190 struct crtc saved_crtc;
191 union aty_pll saved_pll;
190}; 192};
191 193
192 /* 194 /*
@@ -217,6 +219,7 @@ struct atyfb_par {
217#define M64F_XL_DLL 0x00080000 219#define M64F_XL_DLL 0x00080000
218#define M64F_MFB_FORCE_4 0x00100000 220#define M64F_MFB_FORCE_4 0x00100000
219#define M64F_HW_TRIPLE 0x00200000 221#define M64F_HW_TRIPLE 0x00200000
222#define M64F_XL_MEM 0x00400000
220 /* 223 /*
221 * Register access 224 * Register access
222 */ 225 */
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 1207c208a30b..63d3739d43a8 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -66,6 +66,8 @@
66#include <linux/spinlock.h> 66#include <linux/spinlock.h>
67#include <linux/wait.h> 67#include <linux/wait.h>
68#include <linux/backlight.h> 68#include <linux/backlight.h>
69#include <linux/reboot.h>
70#include <linux/dmi.h>
69 71
70#include <asm/io.h> 72#include <asm/io.h>
71#include <linux/uaccess.h> 73#include <linux/uaccess.h>
@@ -249,8 +251,6 @@ static int aty_init(struct fb_info *info);
249static int store_video_par(char *videopar, unsigned char m64_num); 251static int store_video_par(char *videopar, unsigned char m64_num);
250#endif 252#endif
251 253
252static struct crtc saved_crtc;
253static union aty_pll saved_pll;
254static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc); 254static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
255 255
256static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc); 256static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -261,6 +261,8 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
261static int read_aty_sense(const struct atyfb_par *par); 261static int read_aty_sense(const struct atyfb_par *par);
262#endif 262#endif
263 263
264static DEFINE_MUTEX(reboot_lock);
265static struct fb_info *reboot_info;
264 266
265 /* 267 /*
266 * Interface used by the world 268 * Interface used by the world
@@ -361,8 +363,8 @@ static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
361#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) 363#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
362#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) 364#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
363 365
364#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4) 366#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM)
365#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS) 367#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS)
366 368
367static struct { 369static struct {
368 u16 pci_id; 370 u16 pci_id;
@@ -539,6 +541,7 @@ static char ram_edo[] __devinitdata = "EDO";
539static char ram_sdram[] __devinitdata = "SDRAM (1:1)"; 541static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
540static char ram_sgram[] __devinitdata = "SGRAM (1:1)"; 542static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
541static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)"; 543static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
544static char ram_wram[] __devinitdata = "WRAM";
542static char ram_off[] __devinitdata = "OFF"; 545static char ram_off[] __devinitdata = "OFF";
543#endif /* CONFIG_FB_ATY_CT */ 546#endif /* CONFIG_FB_ATY_CT */
544 547
@@ -553,6 +556,10 @@ static char *aty_gx_ram[8] __devinitdata = {
553#ifdef CONFIG_FB_ATY_CT 556#ifdef CONFIG_FB_ATY_CT
554static char *aty_ct_ram[8] __devinitdata = { 557static char *aty_ct_ram[8] __devinitdata = {
555 ram_off, ram_dram, ram_edo, ram_edo, 558 ram_off, ram_dram, ram_edo, ram_edo,
559 ram_sdram, ram_sgram, ram_wram, ram_resv
560};
561static char *aty_xl_ram[8] __devinitdata = {
562 ram_off, ram_dram, ram_edo, ram_edo,
556 ram_sdram, ram_sgram, ram_sdram32, ram_resv 563 ram_sdram, ram_sgram, ram_sdram32, ram_resv
557}; 564};
558#endif /* CONFIG_FB_ATY_CT */ 565#endif /* CONFIG_FB_ATY_CT */
@@ -760,6 +767,17 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
760#endif /* CONFIG_FB_ATY_GENERIC_LCD */ 767#endif /* CONFIG_FB_ATY_GENERIC_LCD */
761} 768}
762 769
770static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp)
771{
772 u32 line_length = vxres * bpp / 8;
773
774 if (par->ram_type == SGRAM ||
775 (!M64_HAS(XL_MEM) && par->ram_type == WRAM))
776 line_length = (line_length + 63) & ~63;
777
778 return line_length;
779}
780
763static int aty_var_to_crtc(const struct fb_info *info, 781static int aty_var_to_crtc(const struct fb_info *info,
764 const struct fb_var_screeninfo *var, struct crtc *crtc) 782 const struct fb_var_screeninfo *var, struct crtc *crtc)
765{ 783{
@@ -769,13 +787,14 @@ static int aty_var_to_crtc(const struct fb_info *info,
769 u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol; 787 u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
770 u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync; 788 u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
771 u32 pix_width, dp_pix_width, dp_chain_mask; 789 u32 pix_width, dp_pix_width, dp_chain_mask;
790 u32 line_length;
772 791
773 /* input */ 792 /* input */
774 xres = var->xres; 793 xres = (var->xres + 7) & ~7;
775 yres = var->yres; 794 yres = var->yres;
776 vxres = var->xres_virtual; 795 vxres = (var->xres_virtual + 7) & ~7;
777 vyres = var->yres_virtual; 796 vyres = var->yres_virtual;
778 xoffset = var->xoffset; 797 xoffset = (var->xoffset + 7) & ~7;
779 yoffset = var->yoffset; 798 yoffset = var->yoffset;
780 bpp = var->bits_per_pixel; 799 bpp = var->bits_per_pixel;
781 if (bpp == 16) 800 if (bpp == 16)
@@ -827,7 +846,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
827 } else 846 } else
828 FAIL("invalid bpp"); 847 FAIL("invalid bpp");
829 848
830 if (vxres * vyres * bpp / 8 > info->fix.smem_len) 849 line_length = calc_line_length(par, vxres, bpp);
850
851 if (vyres * line_length > info->fix.smem_len)
831 FAIL("not enough video RAM"); 852 FAIL("not enough video RAM");
832 853
833 h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; 854 h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
@@ -969,7 +990,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
969 crtc->xoffset = xoffset; 990 crtc->xoffset = xoffset;
970 crtc->yoffset = yoffset; 991 crtc->yoffset = yoffset;
971 crtc->bpp = bpp; 992 crtc->bpp = bpp;
972 crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19); 993 crtc->off_pitch =
994 ((yoffset * line_length + xoffset * bpp / 8) / 8) |
995 ((line_length / bpp) << 22);
973 crtc->vline_crnt_vline = 0; 996 crtc->vline_crnt_vline = 0;
974 997
975 crtc->h_tot_disp = h_total | (h_disp<<16); 998 crtc->h_tot_disp = h_total | (h_disp<<16);
@@ -1394,7 +1417,9 @@ static int atyfb_set_par(struct fb_info *info)
1394 } 1417 }
1395 aty_st_8(DAC_MASK, 0xff, par); 1418 aty_st_8(DAC_MASK, 0xff, par);
1396 1419
1397 info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8; 1420 info->fix.line_length = calc_line_length(par, var->xres_virtual,
1421 var->bits_per_pixel);
1422
1398 info->fix.visual = var->bits_per_pixel <= 8 ? 1423 info->fix.visual = var->bits_per_pixel <= 8 ?
1399 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; 1424 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
1400 1425
@@ -1505,10 +1530,12 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
1505{ 1530{
1506 u32 xoffset = info->var.xoffset; 1531 u32 xoffset = info->var.xoffset;
1507 u32 yoffset = info->var.yoffset; 1532 u32 yoffset = info->var.yoffset;
1508 u32 vxres = par->crtc.vxres; 1533 u32 line_length = info->fix.line_length;
1509 u32 bpp = info->var.bits_per_pixel; 1534 u32 bpp = info->var.bits_per_pixel;
1510 1535
1511 par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19); 1536 par->crtc.off_pitch =
1537 ((yoffset * line_length + xoffset * bpp / 8) / 8) |
1538 ((line_length / bpp) << 22);
1512} 1539}
1513 1540
1514 1541
@@ -2201,7 +2228,7 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
2201 const int *refresh_tbl; 2228 const int *refresh_tbl;
2202 int i, size; 2229 int i, size;
2203 2230
2204 if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) { 2231 if (M64_HAS(XL_MEM)) {
2205 refresh_tbl = ragexl_tbl; 2232 refresh_tbl = ragexl_tbl;
2206 size = ARRAY_SIZE(ragexl_tbl); 2233 size = ARRAY_SIZE(ragexl_tbl);
2207 } else { 2234 } else {
@@ -2335,7 +2362,10 @@ static int __devinit aty_init(struct fb_info *info)
2335 par->pll_ops = &aty_pll_ct; 2362 par->pll_ops = &aty_pll_ct;
2336 par->bus_type = PCI; 2363 par->bus_type = PCI;
2337 par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07); 2364 par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07);
2338 ramname = aty_ct_ram[par->ram_type]; 2365 if (M64_HAS(XL_MEM))
2366 ramname = aty_xl_ram[par->ram_type];
2367 else
2368 ramname = aty_ct_ram[par->ram_type];
2339 /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */ 2369 /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
2340 if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM) 2370 if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
2341 par->pll_limits.mclk = 63; 2371 par->pll_limits.mclk = 63;
@@ -2390,9 +2420,9 @@ static int __devinit aty_init(struct fb_info *info)
2390#endif /* CONFIG_FB_ATY_CT */ 2420#endif /* CONFIG_FB_ATY_CT */
2391 2421
2392 /* save previous video mode */ 2422 /* save previous video mode */
2393 aty_get_crtc(par, &saved_crtc); 2423 aty_get_crtc(par, &par->saved_crtc);
2394 if(par->pll_ops->get_pll) 2424 if(par->pll_ops->get_pll)
2395 par->pll_ops->get_pll(info, &saved_pll); 2425 par->pll_ops->get_pll(info, &par->saved_pll);
2396 2426
2397 par->mem_cntl = aty_ld_le32(MEM_CNTL, par); 2427 par->mem_cntl = aty_ld_le32(MEM_CNTL, par);
2398 gtb_memsize = M64_HAS(GTB_DSP); 2428 gtb_memsize = M64_HAS(GTB_DSP);
@@ -2667,8 +2697,8 @@ static int __devinit aty_init(struct fb_info *info)
2667 2697
2668aty_init_exit: 2698aty_init_exit:
2669 /* restore video mode */ 2699 /* restore video mode */
2670 aty_set_crtc(par, &saved_crtc); 2700 aty_set_crtc(par, &par->saved_crtc);
2671 par->pll_ops->set_pll(info, &saved_pll); 2701 par->pll_ops->set_pll(info, &par->saved_pll);
2672 2702
2673#ifdef CONFIG_MTRR 2703#ifdef CONFIG_MTRR
2674 if (par->mtrr_reg >= 0) { 2704 if (par->mtrr_reg >= 0) {
@@ -3502,6 +3532,11 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3502 par->mmap_map[1].prot_flag = _PAGE_E; 3532 par->mmap_map[1].prot_flag = _PAGE_E;
3503#endif /* __sparc__ */ 3533#endif /* __sparc__ */
3504 3534
3535 mutex_lock(&reboot_lock);
3536 if (!reboot_info)
3537 reboot_info = info;
3538 mutex_unlock(&reboot_lock);
3539
3505 return 0; 3540 return 0;
3506 3541
3507err_release_io: 3542err_release_io:
@@ -3614,8 +3649,8 @@ static void __devexit atyfb_remove(struct fb_info *info)
3614 struct atyfb_par *par = (struct atyfb_par *) info->par; 3649 struct atyfb_par *par = (struct atyfb_par *) info->par;
3615 3650
3616 /* restore video mode */ 3651 /* restore video mode */
3617 aty_set_crtc(par, &saved_crtc); 3652 aty_set_crtc(par, &par->saved_crtc);
3618 par->pll_ops->set_pll(info, &saved_pll); 3653 par->pll_ops->set_pll(info, &par->saved_pll);
3619 3654
3620 unregister_framebuffer(info); 3655 unregister_framebuffer(info);
3621 3656
@@ -3661,6 +3696,11 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
3661{ 3696{
3662 struct fb_info *info = pci_get_drvdata(pdev); 3697 struct fb_info *info = pci_get_drvdata(pdev);
3663 3698
3699 mutex_lock(&reboot_lock);
3700 if (reboot_info == info)
3701 reboot_info = NULL;
3702 mutex_unlock(&reboot_lock);
3703
3664 atyfb_remove(info); 3704 atyfb_remove(info);
3665} 3705}
3666 3706
@@ -3808,6 +3848,56 @@ static int __init atyfb_setup(char *options)
3808} 3848}
3809#endif /* MODULE */ 3849#endif /* MODULE */
3810 3850
3851static int atyfb_reboot_notify(struct notifier_block *nb,
3852 unsigned long code, void *unused)
3853{
3854 struct atyfb_par *par;
3855
3856 if (code != SYS_RESTART)
3857 return NOTIFY_DONE;
3858
3859 mutex_lock(&reboot_lock);
3860
3861 if (!reboot_info)
3862 goto out;
3863
3864 if (!lock_fb_info(reboot_info))
3865 goto out;
3866
3867 par = reboot_info->par;
3868
3869 /*
3870 * HP OmniBook 500's BIOS doesn't like the state of the
3871 * hardware after atyfb has been used. Restore the hardware
3872 * to the original state to allow successful reboots.
3873 */
3874 aty_set_crtc(par, &par->saved_crtc);
3875 par->pll_ops->set_pll(reboot_info, &par->saved_pll);
3876
3877 unlock_fb_info(reboot_info);
3878 out:
3879 mutex_unlock(&reboot_lock);
3880
3881 return NOTIFY_DONE;
3882}
3883
3884static struct notifier_block atyfb_reboot_notifier = {
3885 .notifier_call = atyfb_reboot_notify,
3886};
3887
3888static const struct dmi_system_id atyfb_reboot_ids[] = {
3889 {
3890 .ident = "HP OmniBook 500",
3891 .matches = {
3892 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3893 DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"),
3894 DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"),
3895 },
3896 },
3897
3898 { }
3899};
3900
3811static int __init atyfb_init(void) 3901static int __init atyfb_init(void)
3812{ 3902{
3813 int err1 = 1, err2 = 1; 3903 int err1 = 1, err2 = 1;
@@ -3826,11 +3916,20 @@ static int __init atyfb_init(void)
3826 err2 = atyfb_atari_probe(); 3916 err2 = atyfb_atari_probe();
3827#endif 3917#endif
3828 3918
3829 return (err1 && err2) ? -ENODEV : 0; 3919 if (err1 && err2)
3920 return -ENODEV;
3921
3922 if (dmi_check_system(atyfb_reboot_ids))
3923 register_reboot_notifier(&atyfb_reboot_notifier);
3924
3925 return 0;
3830} 3926}
3831 3927
3832static void __exit atyfb_exit(void) 3928static void __exit atyfb_exit(void)
3833{ 3929{
3930 if (dmi_check_system(atyfb_reboot_ids))
3931 unregister_reboot_notifier(&atyfb_reboot_notifier);
3932
3834#ifdef CONFIG_PCI 3933#ifdef CONFIG_PCI
3835 pci_unregister_driver(&atyfb_driver); 3934 pci_unregister_driver(&atyfb_driver);
3836#endif 3935#endif
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index 0cc9724e61a2..51fcc0a2c94a 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -63,14 +63,17 @@ static void reset_GTC_3D_engine(const struct atyfb_par *par)
63void aty_init_engine(struct atyfb_par *par, struct fb_info *info) 63void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
64{ 64{
65 u32 pitch_value; 65 u32 pitch_value;
66 u32 vxres;
66 67
67 /* determine modal information from global mode structure */ 68 /* determine modal information from global mode structure */
68 pitch_value = info->var.xres_virtual; 69 pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8);
70 vxres = info->var.xres_virtual;
69 71
70 if (info->var.bits_per_pixel == 24) { 72 if (info->var.bits_per_pixel == 24) {
71 /* In 24 bpp, the engine is in 8 bpp - this requires that all */ 73 /* In 24 bpp, the engine is in 8 bpp - this requires that all */
72 /* horizontal coordinates and widths must be adjusted */ 74 /* horizontal coordinates and widths must be adjusted */
73 pitch_value *= 3; 75 pitch_value *= 3;
76 vxres *= 3;
74 } 77 }
75 78
76 /* On GTC (RagePro), we need to reset the 3D engine before */ 79 /* On GTC (RagePro), we need to reset the 3D engine before */
@@ -133,7 +136,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
133 aty_st_le32(SC_LEFT, 0, par); 136 aty_st_le32(SC_LEFT, 0, par);
134 aty_st_le32(SC_TOP, 0, par); 137 aty_st_le32(SC_TOP, 0, par);
135 aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par); 138 aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par);
136 aty_st_le32(SC_RIGHT, pitch_value - 1, par); 139 aty_st_le32(SC_RIGHT, vxres - 1, par);
137 140
138 /* set background color to minimum value (usually BLACK) */ 141 /* set background color to minimum value (usually BLACK) */
139 aty_st_le32(DP_BKGD_CLR, 0, par); 142 aty_st_le32(DP_BKGD_CLR, 0, par);
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index c3ebb6b41ce1..7aed2565c1bd 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -72,7 +72,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
72 if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) { 72 if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) {
73 printk(KERN_INFO "bl : failed to set brightness\n"); 73 printk(KERN_INFO "bl : failed to set brightness\n");
74 ret = -ETIMEDOUT; 74 ret = -ETIMEDOUT;
75 goto out 75 goto out;
76 } 76 }
77 77
78 /* at this point we expect that the mcu has accepted 78 /* at this point we expect that the mcu has accepted
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index e641584e212e..887166267443 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -145,6 +145,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
145 struct backlight_device *bl = platform_get_drvdata(pdev); 145 struct backlight_device *bl = platform_get_drvdata(pdev);
146 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev); 146 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
147 147
148 if (pb->notify)
149 pb->notify(0);
148 pwm_config(pb->pwm, 0, pb->period); 150 pwm_config(pb->pwm, 0, pb->period);
149 pwm_disable(pb->pwm); 151 pwm_disable(pb->pwm);
150 return 0; 152 return 0;
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 1dae7f8f3c6b..51422fc4f606 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -356,7 +356,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
356 lcd->power = FB_BLANK_POWERDOWN; 356 lcd->power = FB_BLANK_POWERDOWN;
357 lcd->mode = MODE_VGA; /* default to VGA */ 357 lcd->mode = MODE_VGA; /* default to VGA */
358 358
359 lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, sizeof(GFP_KERNEL)); 359 lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
360 if (lcd->buf == NULL) { 360 if (lcd->buf == NULL) {
361 kfree(lcd); 361 kfree(lcd);
362 return -ENOMEM; 362 return -ENOMEM;
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index 7bad24ed04ef..108b89e09a80 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Cobalt server LCD frame buffer driver. 2 * Cobalt server LCD frame buffer driver.
3 * 3 *
4 * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 4 * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 471a9a60376a..3a44695b9c09 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1082,7 +1082,6 @@ static void fbcon_init(struct vc_data *vc, int init)
1082 new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); 1082 new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
1083 new_cols /= vc->vc_font.width; 1083 new_cols /= vc->vc_font.width;
1084 new_rows /= vc->vc_font.height; 1084 new_rows /= vc->vc_font.height;
1085 vc_resize(vc, new_cols, new_rows);
1086 1085
1087 /* 1086 /*
1088 * We must always set the mode. The mode of the previous console 1087 * We must always set the mode. The mode of the previous console
@@ -1111,10 +1110,11 @@ static void fbcon_init(struct vc_data *vc, int init)
1111 * vc_{cols,rows}, but we must not set those if we are only 1110 * vc_{cols,rows}, but we must not set those if we are only
1112 * resizing the console. 1111 * resizing the console.
1113 */ 1112 */
1114 if (!init) { 1113 if (init) {
1115 vc->vc_cols = new_cols; 1114 vc->vc_cols = new_cols;
1116 vc->vc_rows = new_rows; 1115 vc->vc_rows = new_rows;
1117 } 1116 } else
1117 vc_resize(vc, new_cols, new_rows);
1118 1118
1119 if (logo) 1119 if (logo)
1120 fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); 1120 fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows);
diff --git a/drivers/video/console/fbcon_rotate.h b/drivers/video/console/fbcon_rotate.h
index 75be5ce53dc5..e233444cda66 100644
--- a/drivers/video/console/fbcon_rotate.h
+++ b/drivers/video/console/fbcon_rotate.h
@@ -45,7 +45,7 @@ static inline void rotate_ud(const char *in, char *out, u32 width, u32 height)
45 width = (width + 7) & ~7; 45 width = (width + 7) & ~7;
46 46
47 for (i = 0; i < height; i++) { 47 for (i = 0; i < height; i++) {
48 for (j = 0; j < width; j++) { 48 for (j = 0; j < width - shift; j++) {
49 if (pattern_test_bit(j, i, width, in)) 49 if (pattern_test_bit(j, i, width, in))
50 pattern_set_bit(width - (1 + j + shift), 50 pattern_set_bit(width - (1 + j + shift),
51 height - (1 + i), 51 height - (1 + i),
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index ef7870f5ea08..857b3668b3ba 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -957,9 +957,14 @@ static int __devinit sticore_pci_init(struct pci_dev *pd,
957#ifdef CONFIG_PCI 957#ifdef CONFIG_PCI
958 unsigned long fb_base, rom_base; 958 unsigned long fb_base, rom_base;
959 unsigned int fb_len, rom_len; 959 unsigned int fb_len, rom_len;
960 int err;
960 struct sti_struct *sti; 961 struct sti_struct *sti;
961 962
962 pci_enable_device(pd); 963 err = pci_enable_device(pd);
964 if (err < 0) {
965 dev_err(&pd->dev, "Cannot enable PCI device\n");
966 return err;
967 }
963 968
964 fb_base = pci_resource_start(pd, 0); 969 fb_base = pci_resource_start(pd, 0);
965 fb_len = pci_resource_len(pd, 0); 970 fb_len = pci_resource_len(pd, 0);
@@ -1048,7 +1053,7 @@ static void __devinit sti_init_roms(void)
1048 1053
1049 /* Register drivers for native & PCI cards */ 1054 /* Register drivers for native & PCI cards */
1050 register_parisc_driver(&pa_sti_driver); 1055 register_parisc_driver(&pa_sti_driver);
1051 pci_register_driver(&pci_sti_driver); 1056 WARN_ON(pci_register_driver(&pci_sti_driver));
1052 1057
1053 /* if we didn't find the given default sti, take the first one */ 1058 /* if we didn't find the given default sti, take the first one */
1054 if (!default_sti) 1059 if (!default_sti)
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index f8a09bf8d0cd..a85c818be945 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -16,7 +16,6 @@
16#include <linux/compat.h> 16#include <linux/compat.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/smp_lock.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/major.h> 20#include <linux/major.h>
22#include <linux/slab.h> 21#include <linux/slab.h>
@@ -1310,8 +1309,6 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
1310 1309
1311static int 1310static int
1312fb_mmap(struct file *file, struct vm_area_struct * vma) 1311fb_mmap(struct file *file, struct vm_area_struct * vma)
1313__acquires(&info->lock)
1314__releases(&info->lock)
1315{ 1312{
1316 int fbidx = iminor(file->f_path.dentry->d_inode); 1313 int fbidx = iminor(file->f_path.dentry->d_inode);
1317 struct fb_info *info = registered_fb[fbidx]; 1314 struct fb_info *info = registered_fb[fbidx];
@@ -1325,16 +1322,14 @@ __releases(&info->lock)
1325 off = vma->vm_pgoff << PAGE_SHIFT; 1322 off = vma->vm_pgoff << PAGE_SHIFT;
1326 if (!fb) 1323 if (!fb)
1327 return -ENODEV; 1324 return -ENODEV;
1325 mutex_lock(&info->mm_lock);
1328 if (fb->fb_mmap) { 1326 if (fb->fb_mmap) {
1329 int res; 1327 int res;
1330 mutex_lock(&info->lock);
1331 res = fb->fb_mmap(info, vma); 1328 res = fb->fb_mmap(info, vma);
1332 mutex_unlock(&info->lock); 1329 mutex_unlock(&info->mm_lock);
1333 return res; 1330 return res;
1334 } 1331 }
1335 1332
1336 mutex_lock(&info->lock);
1337
1338 /* frame buffer memory */ 1333 /* frame buffer memory */
1339 start = info->fix.smem_start; 1334 start = info->fix.smem_start;
1340 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); 1335 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
@@ -1342,13 +1337,13 @@ __releases(&info->lock)
1342 /* memory mapped io */ 1337 /* memory mapped io */
1343 off -= len; 1338 off -= len;
1344 if (info->var.accel_flags) { 1339 if (info->var.accel_flags) {
1345 mutex_unlock(&info->lock); 1340 mutex_unlock(&info->mm_lock);
1346 return -EINVAL; 1341 return -EINVAL;
1347 } 1342 }
1348 start = info->fix.mmio_start; 1343 start = info->fix.mmio_start;
1349 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); 1344 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
1350 } 1345 }
1351 mutex_unlock(&info->lock); 1346 mutex_unlock(&info->mm_lock);
1352 start &= PAGE_MASK; 1347 start &= PAGE_MASK;
1353 if ((vma->vm_end - vma->vm_start + off) > len) 1348 if ((vma->vm_end - vma->vm_start + off) > len)
1354 return -EINVAL; 1349 return -EINVAL;
@@ -1518,6 +1513,7 @@ register_framebuffer(struct fb_info *fb_info)
1518 break; 1513 break;
1519 fb_info->node = i; 1514 fb_info->node = i;
1520 mutex_init(&fb_info->lock); 1515 mutex_init(&fb_info->lock);
1516 mutex_init(&fb_info->mm_lock);
1521 1517
1522 fb_info->dev = device_create(fb_class, fb_info->device, 1518 fb_info->dev = device_create(fb_class, fb_info->device,
1523 MKDEV(FB_MAJOR, i), NULL, "fb%d", i); 1519 MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 5c1a2c01778f..9ae9cd32bd06 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -256,8 +256,8 @@ static void fix_edid(unsigned char *edid, int fix)
256 256
257static int edid_checksum(unsigned char *edid) 257static int edid_checksum(unsigned char *edid)
258{ 258{
259 unsigned char i, csum = 0, all_null = 0; 259 unsigned char csum = 0, all_null = 0;
260 int err = 0, fix = check_edid(edid); 260 int i, err = 0, fix = check_edid(edid);
261 261
262 if (fix) 262 if (fix)
263 fix_edid(edid, fix); 263 fix_edid(edid, fix);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index f153c581cbd7..72d68b3dc478 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -750,24 +750,26 @@ static void update_lcdc(struct fb_info *info)
750static int map_video_memory(struct fb_info *info) 750static int map_video_memory(struct fb_info *info)
751{ 751{
752 phys_addr_t phys; 752 phys_addr_t phys;
753 u32 smem_len = info->fix.line_length * info->var.yres_virtual;
753 754
754 pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual); 755 pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual);
755 pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual); 756 pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
756 pr_debug("info->fix.line_length = %d\n", info->fix.line_length); 757 pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
758 pr_debug("MAP_VIDEO_MEMORY: smem_len = %u\n", smem_len);
757 759
758 info->fix.smem_len = info->fix.line_length * info->var.yres_virtual; 760 info->screen_base = fsl_diu_alloc(smem_len, &phys);
759 pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len);
760 info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys);
761 if (info->screen_base == NULL) { 761 if (info->screen_base == NULL) {
762 printk(KERN_ERR "Unable to allocate fb memory\n"); 762 printk(KERN_ERR "Unable to allocate fb memory\n");
763 return -ENOMEM; 763 return -ENOMEM;
764 } 764 }
765 mutex_lock(&info->mm_lock);
765 info->fix.smem_start = (unsigned long) phys; 766 info->fix.smem_start = (unsigned long) phys;
767 info->fix.smem_len = smem_len;
768 mutex_unlock(&info->mm_lock);
766 info->screen_size = info->fix.smem_len; 769 info->screen_size = info->fix.smem_len;
767 770
768 pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n", 771 pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n",
769 info->fix.smem_start, 772 info->fix.smem_start, info->fix.smem_len);
770 info->fix.smem_len);
771 pr_debug("screen base %p\n", info->screen_base); 773 pr_debug("screen base %p\n", info->screen_base);
772 774
773 return 0; 775 return 0;
@@ -776,9 +778,11 @@ static int map_video_memory(struct fb_info *info)
776static void unmap_video_memory(struct fb_info *info) 778static void unmap_video_memory(struct fb_info *info)
777{ 779{
778 fsl_diu_free(info->screen_base, info->fix.smem_len); 780 fsl_diu_free(info->screen_base, info->fix.smem_len);
781 mutex_lock(&info->mm_lock);
779 info->screen_base = NULL; 782 info->screen_base = NULL;
780 info->fix.smem_start = 0; 783 info->fix.smem_start = 0;
781 info->fix.smem_len = 0; 784 info->fix.smem_len = 0;
785 mutex_unlock(&info->mm_lock);
782} 786}
783 787
784/* 788/*
@@ -1219,12 +1223,6 @@ static int __devinit install_fb(struct fb_info *info)
1219 return -EINVAL; 1223 return -EINVAL;
1220 } 1224 }
1221 1225
1222 if (fsl_diu_set_par(info)) {
1223 printk(KERN_ERR "fb_set_par failed");
1224 fb_dealloc_cmap(&info->cmap);
1225 return -EINVAL;
1226 }
1227
1228 if (register_framebuffer(info) < 0) { 1226 if (register_framebuffer(info) < 0) {
1229 printk(KERN_ERR "register_framebuffer failed"); 1227 printk(KERN_ERR "register_framebuffer failed");
1230 unmap_video_memory(info); 1228 unmap_video_memory(info);
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index 020db7fc9153..e7116a6d82d3 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -44,9 +44,6 @@ static struct fb_fix_screeninfo hitfb_fix __initdata = {
44 .accel = FB_ACCEL_NONE, 44 .accel = FB_ACCEL_NONE,
45}; 45};
46 46
47static u32 pseudo_palette[16];
48static struct fb_info fb_info;
49
50static inline void hitfb_accel_wait(void) 47static inline void hitfb_accel_wait(void)
51{ 48{
52 while (fb_readw(HD64461_GRCFGR) & HD64461_GRCFGR_ACCSTATUS) ; 49 while (fb_readw(HD64461_GRCFGR) & HD64461_GRCFGR_ACCSTATUS) ;
@@ -331,6 +328,8 @@ static struct fb_ops hitfb_ops = {
331static int __init hitfb_probe(struct platform_device *dev) 328static int __init hitfb_probe(struct platform_device *dev)
332{ 329{
333 unsigned short lcdclor, ldr3, ldvndr; 330 unsigned short lcdclor, ldr3, ldvndr;
331 struct fb_info *info;
332 int ret;
334 333
335 if (fb_get_options("hitfb", NULL)) 334 if (fb_get_options("hitfb", NULL))
336 return -ENODEV; 335 return -ENODEV;
@@ -384,32 +383,53 @@ static int __init hitfb_probe(struct platform_device *dev)
384 break; 383 break;
385 } 384 }
386 385
387 fb_info.fbops = &hitfb_ops; 386 info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
388 fb_info.var = hitfb_var; 387 if (unlikely(!info))
389 fb_info.fix = hitfb_fix; 388 return -ENOMEM;
390 fb_info.pseudo_palette = pseudo_palette; 389
391 fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN | 390 info->fbops = &hitfb_ops;
391 info->var = hitfb_var;
392 info->fix = hitfb_fix;
393 info->pseudo_palette = info->par;
394 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
392 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA; 395 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
393 396
394 fb_info.screen_base = (void *)hitfb_fix.smem_start; 397 info->screen_base = (void *)hitfb_fix.smem_start;
395 398
396 fb_alloc_cmap(&fb_info.cmap, 256, 0); 399 ret = fb_alloc_cmap(&info->cmap, 256, 0);
400 if (unlikely(ret < 0))
401 goto err_fb;
397 402
398 if (register_framebuffer(&fb_info) < 0) 403 ret = register_framebuffer(info);
399 return -EINVAL; 404 if (unlikely(ret < 0))
405 goto err;
406
407 platform_set_drvdata(dev, info);
400 408
401 printk(KERN_INFO "fb%d: %s frame buffer device\n", 409 printk(KERN_INFO "fb%d: %s frame buffer device\n",
402 fb_info.node, fb_info.fix.id); 410 info->node, info->fix.id);
411
403 return 0; 412 return 0;
413
414err:
415 fb_dealloc_cmap(&info->cmap);
416err_fb:
417 framebuffer_release(info);
418 return ret;
404} 419}
405 420
406static int __exit hitfb_remove(struct platform_device *dev) 421static int __exit hitfb_remove(struct platform_device *dev)
407{ 422{
408 return unregister_framebuffer(&fb_info); 423 struct fb_info *info = platform_get_drvdata(dev);
424
425 unregister_framebuffer(info);
426 fb_dealloc_cmap(&info->cmap);
427 framebuffer_release(info);
428
429 return 0;
409} 430}
410 431
411#ifdef CONFIG_PM 432static int hitfb_suspend(struct device *dev)
412static int hitfb_suspend(struct platform_device *dev, pm_message_t state)
413{ 433{
414 u16 v; 434 u16 v;
415 435
@@ -421,7 +441,7 @@ static int hitfb_suspend(struct platform_device *dev, pm_message_t state)
421 return 0; 441 return 0;
422} 442}
423 443
424static int hitfb_resume(struct platform_device *dev) 444static int hitfb_resume(struct device *dev)
425{ 445{
426 u16 v; 446 u16 v;
427 447
@@ -435,17 +455,19 @@ static int hitfb_resume(struct platform_device *dev)
435 455
436 return 0; 456 return 0;
437} 457}
438#endif 458
459static struct dev_pm_ops hitfb_dev_pm_ops = {
460 .suspend = hitfb_suspend,
461 .resume = hitfb_resume,
462};
439 463
440static struct platform_driver hitfb_driver = { 464static struct platform_driver hitfb_driver = {
441 .probe = hitfb_probe, 465 .probe = hitfb_probe,
442 .remove = __exit_p(hitfb_remove), 466 .remove = __exit_p(hitfb_remove),
443#ifdef CONFIG_PM
444 .suspend = hitfb_suspend,
445 .resume = hitfb_resume,
446#endif
447 .driver = { 467 .driver = {
448 .name = "hitfb", 468 .name = "hitfb",
469 .owner = THIS_MODULE,
470 .pm = &hitfb_dev_pm_ops,
449 }, 471 },
450}; 472};
451 473
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 2e940199fc89..5743ea25e818 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1090,8 +1090,10 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
1090 memset(fix, 0, sizeof(struct fb_fix_screeninfo)); 1090 memset(fix, 0, sizeof(struct fb_fix_screeninfo));
1091 1091
1092 strcpy(fix->id, "I810"); 1092 strcpy(fix->id, "I810");
1093 mutex_lock(&info->mm_lock);
1093 fix->smem_start = par->fb.physical; 1094 fix->smem_start = par->fb.physical;
1094 fix->smem_len = par->fb.size; 1095 fix->smem_len = par->fb.size;
1096 mutex_unlock(&info->mm_lock);
1095 fix->type = FB_TYPE_PACKED_PIXELS; 1097 fix->type = FB_TYPE_PACKED_PIXELS;
1096 fix->type_aux = 0; 1098 fix->type_aux = 0;
1097 fix->xpanstep = 8; 1099 fix->xpanstep = 8;
@@ -2058,8 +2060,7 @@ static int __devinit i810fb_init_pci (struct pci_dev *dev,
2058 2060
2059 fb_var_to_videomode(&mode, &info->var); 2061 fb_var_to_videomode(&mode, &info->var);
2060 fb_add_videomode(&mode, &info->modelist); 2062 fb_add_videomode(&mode, &info->modelist);
2061 encode_fix(&info->fix, info); 2063
2062
2063 i810fb_init_ringbuffer(info); 2064 i810fb_init_ringbuffer(info);
2064 err = register_framebuffer(info); 2065 err = register_framebuffer(info);
2065 2066
diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
index 0ce3b0a89798..a74e5da17aa0 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.c
+++ b/drivers/video/matrox/matroxfb_DAC1064.c
@@ -454,9 +454,9 @@ static void DAC1064_restore_2(WPMINFO2) {
454 dprintk(KERN_DEBUG "DAC1064regs "); 454 dprintk(KERN_DEBUG "DAC1064regs ");
455 for (i = 0; i < sizeof(MGA1064_DAC_regs); i++) { 455 for (i = 0; i < sizeof(MGA1064_DAC_regs); i++) {
456 dprintk("R%02X=%02X ", MGA1064_DAC_regs[i], ACCESS_FBINFO(hw).DACreg[i]); 456 dprintk("R%02X=%02X ", MGA1064_DAC_regs[i], ACCESS_FBINFO(hw).DACreg[i]);
457 if ((i & 0x7) == 0x7) dprintk("\n" KERN_DEBUG "continuing... "); 457 if ((i & 0x7) == 0x7) dprintk(KERN_DEBUG "continuing... ");
458 } 458 }
459 dprintk("\n" KERN_DEBUG "DAC1064clk "); 459 dprintk(KERN_DEBUG "DAC1064clk ");
460 for (i = 0; i < 6; i++) 460 for (i = 0; i < 6; i++)
461 dprintk("C%02X=%02X ", i, ACCESS_FBINFO(hw).DACclk[i]); 461 dprintk("C%02X=%02X ", i, ACCESS_FBINFO(hw).DACclk[i]);
462 dprintk("\n"); 462 dprintk("\n");
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index 13524821e242..4e825112a601 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -651,9 +651,9 @@ static void Ti3026_restore(WPMINFO2) {
651 dprintk(KERN_DEBUG "3026DACregs "); 651 dprintk(KERN_DEBUG "3026DACregs ");
652 for (i = 0; i < 21; i++) { 652 for (i = 0; i < 21; i++) {
653 dprintk("R%02X=%02X ", DACseq[i], hw->DACreg[i]); 653 dprintk("R%02X=%02X ", DACseq[i], hw->DACreg[i]);
654 if ((i & 0x7) == 0x7) dprintk("\n" KERN_DEBUG "continuing... "); 654 if ((i & 0x7) == 0x7) dprintk(KERN_DEBUG "continuing... ");
655 } 655 }
656 dprintk("\n" KERN_DEBUG "DACclk "); 656 dprintk(KERN_DEBUG "DACclk ");
657 for (i = 0; i < 6; i++) 657 for (i = 0; i < 6; i++)
658 dprintk("C%02X=%02X ", i, hw->DACclk[i]); 658 dprintk("C%02X=%02X ", i, hw->DACclk[i]);
659 dprintk("\n"); 659 dprintk("\n");
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 8e7a275df50c..0c1049b308bf 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -724,8 +724,10 @@ static void matroxfb_update_fix(WPMINFO2)
724 struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix; 724 struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
725 DBG(__func__) 725 DBG(__func__)
726 726
727 mutex_lock(&ACCESS_FBINFO(fbcon).mm_lock);
727 fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes); 728 fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes);
728 fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes); 729 fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes);
730 mutex_unlock(&ACCESS_FBINFO(fbcon).mm_lock);
729} 731}
730 732
731static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 733static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -1874,7 +1876,6 @@ static int initMatrox2(WPMINFO struct board* b){
1874 } 1876 }
1875 matroxfb_init_fix(PMINFO2); 1877 matroxfb_init_fix(PMINFO2);
1876 ACCESS_FBINFO(fbcon.screen_base) = vaddr_va(ACCESS_FBINFO(video.vbase)); 1878 ACCESS_FBINFO(fbcon.screen_base) = vaddr_va(ACCESS_FBINFO(video.vbase));
1877 matroxfb_update_fix(PMINFO2);
1878 /* Normalize values (namely yres_virtual) */ 1879 /* Normalize values (namely yres_virtual) */
1879 matroxfb_check_var(&vesafb_defined, &ACCESS_FBINFO(fbcon)); 1880 matroxfb_check_var(&vesafb_defined, &ACCESS_FBINFO(fbcon));
1880 /* And put it into "current" var. Do NOT program hardware yet, or we'll not take over 1881 /* And put it into "current" var. Do NOT program hardware yet, or we'll not take over
@@ -2081,6 +2082,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
2081 spin_lock_init(&ACCESS_FBINFO(lock.accel)); 2082 spin_lock_init(&ACCESS_FBINFO(lock.accel));
2082 init_rwsem(&ACCESS_FBINFO(crtc2.lock)); 2083 init_rwsem(&ACCESS_FBINFO(crtc2.lock));
2083 init_rwsem(&ACCESS_FBINFO(altout.lock)); 2084 init_rwsem(&ACCESS_FBINFO(altout.lock));
2085 mutex_init(&ACCESS_FBINFO(fbcon).mm_lock);
2084 ACCESS_FBINFO(irq_flags) = 0; 2086 ACCESS_FBINFO(irq_flags) = 0;
2085 init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait)); 2087 init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait));
2086 init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait)); 2088 init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait));
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 7ac4c5f6145d..ebcb5c6b4962 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -289,7 +289,12 @@ static int matroxfb_dh_release(struct fb_info* info, int user) {
289#undef m2info 289#undef m2info
290} 290}
291 291
292static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) { 292/*
293 * This function is called before the register_framebuffer so
294 * no locking is needed.
295 */
296static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info)
297{
293 struct fb_fix_screeninfo *fix = &m2info->fbcon.fix; 298 struct fb_fix_screeninfo *fix = &m2info->fbcon.fix;
294 299
295 strcpy(fix->id, "MATROX DH"); 300 strcpy(fix->id, "MATROX DH");
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index b7af5256e887..054ef29be479 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -669,7 +669,8 @@ static uint32_t bpp_to_pixfmt(int bpp)
669} 669}
670 670
671static int mx3fb_blank(int blank, struct fb_info *fbi); 671static int mx3fb_blank(int blank, struct fb_info *fbi);
672static int mx3fb_map_video_memory(struct fb_info *fbi); 672static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len,
673 bool lock);
673static int mx3fb_unmap_video_memory(struct fb_info *fbi); 674static int mx3fb_unmap_video_memory(struct fb_info *fbi);
674 675
675/** 676/**
@@ -711,12 +712,7 @@ static void mx3fb_dma_done(void *arg)
711 complete(&mx3_fbi->flip_cmpl); 712 complete(&mx3_fbi->flip_cmpl);
712} 713}
713 714
714/** 715static int __set_par(struct fb_info *fbi, bool lock)
715 * mx3fb_set_par() - set framebuffer parameters and change the operating mode.
716 * @fbi: framebuffer information pointer.
717 * @return: 0 on success or negative error code on failure.
718 */
719static int mx3fb_set_par(struct fb_info *fbi)
720{ 716{
721 u32 mem_len; 717 u32 mem_len;
722 struct ipu_di_signal_cfg sig_cfg; 718 struct ipu_di_signal_cfg sig_cfg;
@@ -727,10 +723,6 @@ static int mx3fb_set_par(struct fb_info *fbi)
727 struct idmac_video_param *video = &ichan->params.video; 723 struct idmac_video_param *video = &ichan->params.video;
728 struct scatterlist *sg = mx3_fbi->sg; 724 struct scatterlist *sg = mx3_fbi->sg;
729 725
730 dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+');
731
732 mutex_lock(&mx3_fbi->mutex);
733
734 /* Total cleanup */ 726 /* Total cleanup */
735 if (mx3_fbi->txd) 727 if (mx3_fbi->txd)
736 sdc_disable_channel(mx3_fbi); 728 sdc_disable_channel(mx3_fbi);
@@ -742,11 +734,8 @@ static int mx3fb_set_par(struct fb_info *fbi)
742 if (fbi->fix.smem_start) 734 if (fbi->fix.smem_start)
743 mx3fb_unmap_video_memory(fbi); 735 mx3fb_unmap_video_memory(fbi);
744 736
745 fbi->fix.smem_len = mem_len; 737 if (mx3fb_map_video_memory(fbi, mem_len, lock) < 0)
746 if (mx3fb_map_video_memory(fbi) < 0) {
747 mutex_unlock(&mx3_fbi->mutex);
748 return -ENOMEM; 738 return -ENOMEM;
749 }
750 } 739 }
751 740
752 sg_init_table(&sg[0], 1); 741 sg_init_table(&sg[0], 1);
@@ -792,7 +781,6 @@ static int mx3fb_set_par(struct fb_info *fbi)
792 fbi->var.vsync_len, 781 fbi->var.vsync_len,
793 fbi->var.lower_margin + 782 fbi->var.lower_margin +
794 fbi->var.vsync_len, sig_cfg) != 0) { 783 fbi->var.vsync_len, sig_cfg) != 0) {
795 mutex_unlock(&mx3_fbi->mutex);
796 dev_err(fbi->device, 784 dev_err(fbi->device,
797 "mx3fb: Error initializing panel.\n"); 785 "mx3fb: Error initializing panel.\n");
798 return -EINVAL; 786 return -EINVAL;
@@ -811,9 +799,30 @@ static int mx3fb_set_par(struct fb_info *fbi)
811 if (mx3_fbi->blank == FB_BLANK_UNBLANK) 799 if (mx3_fbi->blank == FB_BLANK_UNBLANK)
812 sdc_enable_channel(mx3_fbi); 800 sdc_enable_channel(mx3_fbi);
813 801
802 return 0;
803}
804
805/**
806 * mx3fb_set_par() - set framebuffer parameters and change the operating mode.
807 * @fbi: framebuffer information pointer.
808 * @return: 0 on success or negative error code on failure.
809 */
810static int mx3fb_set_par(struct fb_info *fbi)
811{
812 struct mx3fb_info *mx3_fbi = fbi->par;
813 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
814 struct idmac_channel *ichan = mx3_fbi->idmac_channel;
815 int ret;
816
817 dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+');
818
819 mutex_lock(&mx3_fbi->mutex);
820
821 ret = __set_par(fbi, true);
822
814 mutex_unlock(&mx3_fbi->mutex); 823 mutex_unlock(&mx3_fbi->mutex);
815 824
816 return 0; 825 return ret;
817} 826}
818 827
819/** 828/**
@@ -967,21 +976,11 @@ static int mx3fb_setcolreg(unsigned int regno, unsigned int red,
967 return ret; 976 return ret;
968} 977}
969 978
970/** 979static void __blank(int blank, struct fb_info *fbi)
971 * mx3fb_blank() - blank the display.
972 */
973static int mx3fb_blank(int blank, struct fb_info *fbi)
974{ 980{
975 struct mx3fb_info *mx3_fbi = fbi->par; 981 struct mx3fb_info *mx3_fbi = fbi->par;
976 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; 982 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
977 983
978 dev_dbg(fbi->device, "%s, blank = %d, base %p, len %u\n", __func__,
979 blank, fbi->screen_base, fbi->fix.smem_len);
980
981 if (mx3_fbi->blank == blank)
982 return 0;
983
984 mutex_lock(&mx3_fbi->mutex);
985 mx3_fbi->blank = blank; 984 mx3_fbi->blank = blank;
986 985
987 switch (blank) { 986 switch (blank) {
@@ -1000,6 +999,23 @@ static int mx3fb_blank(int blank, struct fb_info *fbi)
1000 sdc_set_brightness(mx3fb, mx3fb->backlight_level); 999 sdc_set_brightness(mx3fb, mx3fb->backlight_level);
1001 break; 1000 break;
1002 } 1001 }
1002}
1003
1004/**
1005 * mx3fb_blank() - blank the display.
1006 */
1007static int mx3fb_blank(int blank, struct fb_info *fbi)
1008{
1009 struct mx3fb_info *mx3_fbi = fbi->par;
1010
1011 dev_dbg(fbi->device, "%s, blank = %d, base %p, len %u\n", __func__,
1012 blank, fbi->screen_base, fbi->fix.smem_len);
1013
1014 if (mx3_fbi->blank == blank)
1015 return 0;
1016
1017 mutex_lock(&mx3_fbi->mutex);
1018 __blank(blank, fbi);
1003 mutex_unlock(&mx3_fbi->mutex); 1019 mutex_unlock(&mx3_fbi->mutex);
1004 1020
1005 return 0; 1021 return 0;
@@ -1198,6 +1214,8 @@ static int mx3fb_resume(struct platform_device *pdev)
1198/** 1214/**
1199 * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer. 1215 * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
1200 * @fbi: framebuffer information pointer 1216 * @fbi: framebuffer information pointer
1217 * @mem_len: length of mapped memory
1218 * @lock: do not lock during initialisation
1201 * @return: Error code indicating success or failure 1219 * @return: Error code indicating success or failure
1202 * 1220 *
1203 * This buffer is remapped into a non-cached, non-buffered, memory region to 1221 * This buffer is remapped into a non-cached, non-buffered, memory region to
@@ -1205,23 +1223,29 @@ static int mx3fb_resume(struct platform_device *pdev)
1205 * area is remapped, all virtual memory access to the video memory should occur 1223 * area is remapped, all virtual memory access to the video memory should occur
1206 * at the new region. 1224 * at the new region.
1207 */ 1225 */
1208static int mx3fb_map_video_memory(struct fb_info *fbi) 1226static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len,
1227 bool lock)
1209{ 1228{
1210 int retval = 0; 1229 int retval = 0;
1211 dma_addr_t addr; 1230 dma_addr_t addr;
1212 1231
1213 fbi->screen_base = dma_alloc_writecombine(fbi->device, 1232 fbi->screen_base = dma_alloc_writecombine(fbi->device,
1214 fbi->fix.smem_len, 1233 mem_len,
1215 &addr, GFP_DMA); 1234 &addr, GFP_DMA);
1216 1235
1217 if (!fbi->screen_base) { 1236 if (!fbi->screen_base) {
1218 dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n", 1237 dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
1219 fbi->fix.smem_len); 1238 mem_len);
1220 retval = -EBUSY; 1239 retval = -EBUSY;
1221 goto err0; 1240 goto err0;
1222 } 1241 }
1223 1242
1243 if (lock)
1244 mutex_lock(&fbi->mm_lock);
1224 fbi->fix.smem_start = addr; 1245 fbi->fix.smem_start = addr;
1246 fbi->fix.smem_len = mem_len;
1247 if (lock)
1248 mutex_unlock(&fbi->mm_lock);
1225 1249
1226 dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n", 1250 dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
1227 (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len); 1251 (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
@@ -1251,8 +1275,10 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
1251 fbi->screen_base, fbi->fix.smem_start); 1275 fbi->screen_base, fbi->fix.smem_start);
1252 1276
1253 fbi->screen_base = 0; 1277 fbi->screen_base = 0;
1278 mutex_lock(&fbi->mm_lock);
1254 fbi->fix.smem_start = 0; 1279 fbi->fix.smem_start = 0;
1255 fbi->fix.smem_len = 0; 1280 fbi->fix.smem_len = 0;
1281 mutex_unlock(&fbi->mm_lock);
1256 return 0; 1282 return 0;
1257} 1283}
1258 1284
@@ -1360,11 +1386,11 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
1360 init_completion(&mx3fbi->flip_cmpl); 1386 init_completion(&mx3fbi->flip_cmpl);
1361 disable_irq(ichan->eof_irq); 1387 disable_irq(ichan->eof_irq);
1362 dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq); 1388 dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
1363 ret = mx3fb_set_par(fbi); 1389 ret = __set_par(fbi, false);
1364 if (ret < 0) 1390 if (ret < 0)
1365 goto esetpar; 1391 goto esetpar;
1366 1392
1367 mx3fb_blank(FB_BLANK_UNBLANK, fbi); 1393 __blank(FB_BLANK_UNBLANK, fbi);
1368 1394
1369 dev_info(dev, "registered, using mode %s\n", fb_mode); 1395 dev_info(dev, "registered, using mode %s\n", fb_mode);
1370 1396
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 060d72fe57cb..8862233d57b6 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -393,8 +393,10 @@ static void set_fb_fix(struct fb_info *fbi)
393 393
394 rg = &plane->fbdev->mem_desc.region[plane->idx]; 394 rg = &plane->fbdev->mem_desc.region[plane->idx];
395 fbi->screen_base = rg->vaddr; 395 fbi->screen_base = rg->vaddr;
396 mutex_lock(&fbi->mm_lock);
396 fix->smem_start = rg->paddr; 397 fix->smem_start = rg->paddr;
397 fix->smem_len = rg->size; 398 fix->smem_len = rg->size;
399 mutex_unlock(&fbi->mm_lock);
398 400
399 fix->type = FB_TYPE_PACKED_PIXELS; 401 fix->type = FB_TYPE_PACKED_PIXELS;
400 bpp = var->bits_per_pixel; 402 bpp = var->bits_per_pixel;
@@ -886,8 +888,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
886 * plane memory is dealloce'd, the other 888 * plane memory is dealloce'd, the other
887 * screen parameters in var / fix are invalid. 889 * screen parameters in var / fix are invalid.
888 */ 890 */
891 mutex_lock(&fbi->mm_lock);
889 fbi->fix.smem_start = 0; 892 fbi->fix.smem_start = 0;
890 fbi->fix.smem_len = 0; 893 fbi->fix.smem_len = 0;
894 mutex_unlock(&fbi->mm_lock);
891 } 895 }
892 } 896 }
893 } 897 }
@@ -1250,7 +1254,7 @@ static struct fb_ops omapfb_ops = {
1250static ssize_t omapfb_show_caps_num(struct device *dev, 1254static ssize_t omapfb_show_caps_num(struct device *dev,
1251 struct device_attribute *attr, char *buf) 1255 struct device_attribute *attr, char *buf)
1252{ 1256{
1253 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; 1257 struct omapfb_device *fbdev = dev_get_drvdata(dev);
1254 int plane; 1258 int plane;
1255 size_t size; 1259 size_t size;
1256 struct omapfb_caps caps; 1260 struct omapfb_caps caps;
@@ -1270,7 +1274,7 @@ static ssize_t omapfb_show_caps_num(struct device *dev,
1270static ssize_t omapfb_show_caps_text(struct device *dev, 1274static ssize_t omapfb_show_caps_text(struct device *dev,
1271 struct device_attribute *attr, char *buf) 1275 struct device_attribute *attr, char *buf)
1272{ 1276{
1273 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; 1277 struct omapfb_device *fbdev = dev_get_drvdata(dev);
1274 int i; 1278 int i;
1275 struct omapfb_caps caps; 1279 struct omapfb_caps caps;
1276 int plane; 1280 int plane;
@@ -1317,7 +1321,7 @@ static DEVICE_ATTR(caps_text, 0444, omapfb_show_caps_text, NULL);
1317static ssize_t omapfb_show_panel_name(struct device *dev, 1321static ssize_t omapfb_show_panel_name(struct device *dev,
1318 struct device_attribute *attr, char *buf) 1322 struct device_attribute *attr, char *buf)
1319{ 1323{
1320 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; 1324 struct omapfb_device *fbdev = dev_get_drvdata(dev);
1321 1325
1322 return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name); 1326 return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name);
1323} 1327}
@@ -1326,7 +1330,7 @@ static ssize_t omapfb_show_bklight_level(struct device *dev,
1326 struct device_attribute *attr, 1330 struct device_attribute *attr,
1327 char *buf) 1331 char *buf)
1328{ 1332{
1329 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; 1333 struct omapfb_device *fbdev = dev_get_drvdata(dev);
1330 int r; 1334 int r;
1331 1335
1332 if (fbdev->panel->get_bklight_level) { 1336 if (fbdev->panel->get_bklight_level) {
@@ -1341,7 +1345,7 @@ static ssize_t omapfb_store_bklight_level(struct device *dev,
1341 struct device_attribute *attr, 1345 struct device_attribute *attr,
1342 const char *buf, size_t size) 1346 const char *buf, size_t size)
1343{ 1347{
1344 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; 1348 struct omapfb_device *fbdev = dev_get_drvdata(dev);
1345 int r; 1349 int r;
1346 1350
1347 if (fbdev->panel->set_bklight_level) { 1351 if (fbdev->panel->set_bklight_level) {
@@ -1360,7 +1364,7 @@ static ssize_t omapfb_store_bklight_level(struct device *dev,
1360static ssize_t omapfb_show_bklight_max(struct device *dev, 1364static ssize_t omapfb_show_bklight_max(struct device *dev,
1361 struct device_attribute *attr, char *buf) 1365 struct device_attribute *attr, char *buf)
1362{ 1366{
1363 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; 1367 struct omapfb_device *fbdev = dev_get_drvdata(dev);
1364 int r; 1368 int r;
1365 1369
1366 if (fbdev->panel->get_bklight_level) { 1370 if (fbdev->panel->get_bklight_level) {
@@ -1393,7 +1397,7 @@ static struct attribute_group panel_attr_grp = {
1393static ssize_t omapfb_show_ctrl_name(struct device *dev, 1397static ssize_t omapfb_show_ctrl_name(struct device *dev,
1394 struct device_attribute *attr, char *buf) 1398 struct device_attribute *attr, char *buf)
1395{ 1399{
1396 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; 1400 struct omapfb_device *fbdev = dev_get_drvdata(dev);
1397 1401
1398 return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->ctrl->name); 1402 return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->ctrl->name);
1399} 1403}
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 03b3670130a0..bacfabd9ce16 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -141,7 +141,9 @@ static int platinumfb_set_par (struct fb_info *info)
141 offset = 0x10; 141 offset = 0x10;
142 142
143 info->screen_base = pinfo->frame_buffer + init->fb_offset + offset; 143 info->screen_base = pinfo->frame_buffer + init->fb_offset + offset;
144 mutex_lock(&info->mm_lock);
144 info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset; 145 info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset;
146 mutex_unlock(&info->mm_lock);
145 info->fix.visual = (pinfo->cmode == CMODE_8) ? 147 info->fix.visual = (pinfo->cmode == CMODE_8) ?
146 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; 148 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
147 info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode) 149 info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode)
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 0889d50c3288..6506117c134b 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -815,8 +815,10 @@ static int overlayfb_map_video_memory(struct pxafb_layer *ofb)
815 ofb->video_mem_phys = virt_to_phys(ofb->video_mem); 815 ofb->video_mem_phys = virt_to_phys(ofb->video_mem);
816 ofb->video_mem_size = size; 816 ofb->video_mem_size = size;
817 817
818 mutex_lock(&ofb->fb.mm_lock);
818 ofb->fb.fix.smem_start = ofb->video_mem_phys; 819 ofb->fb.fix.smem_start = ofb->video_mem_phys;
819 ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual; 820 ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual;
821 mutex_unlock(&ofb->fb.mm_lock);
820 ofb->fb.screen_base = ofb->video_mem; 822 ofb->fb.screen_base = ofb->video_mem;
821 return 0; 823 return 0;
822} 824}
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 43680e545427..5a72083dc67c 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -211,23 +211,21 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
211 211
212/** 212/**
213 * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock. 213 * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
214 * @id: window id.
214 * @sfb: The hardware state. 215 * @sfb: The hardware state.
215 * @pixclock: The pixel clock wanted, in picoseconds. 216 * @pixclock: The pixel clock wanted, in picoseconds.
216 * 217 *
217 * Given the specified pixel clock, work out the necessary divider to get 218 * Given the specified pixel clock, work out the necessary divider to get
218 * close to the output frequency. 219 * close to the output frequency.
219 */ 220 */
220static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk) 221static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk)
221{ 222{
223 struct s3c_fb_pd_win *win = sfb->pdata->win[id];
222 unsigned long clk = clk_get_rate(sfb->bus_clk); 224 unsigned long clk = clk_get_rate(sfb->bus_clk);
223 unsigned long long tmp;
224 unsigned int result; 225 unsigned int result;
225 226
226 tmp = (unsigned long long)clk; 227 pixclk *= win->win_mode.refresh;
227 tmp *= pixclk; 228 result = clk / pixclk;
228
229 do_div(tmp, 1000000000UL);
230 result = (unsigned int)tmp / 1000;
231 229
232 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", 230 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
233 pixclk, clk, result, clk / result); 231 pixclk, clk, result, clk / result);
@@ -267,6 +265,7 @@ static int s3c_fb_set_par(struct fb_info *info)
267 struct s3c_fb *sfb = win->parent; 265 struct s3c_fb *sfb = win->parent;
268 void __iomem *regs = sfb->regs; 266 void __iomem *regs = sfb->regs;
269 int win_no = win->index; 267 int win_no = win->index;
268 u32 osdc_data = 0;
270 u32 data; 269 u32 data;
271 u32 pagewidth; 270 u32 pagewidth;
272 int clkdiv; 271 int clkdiv;
@@ -302,7 +301,7 @@ static int s3c_fb_set_par(struct fb_info *info)
302 /* use window 0 as the basis for the lcd output timings */ 301 /* use window 0 as the basis for the lcd output timings */
303 302
304 if (win_no == 0) { 303 if (win_no == 0) {
305 clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock); 304 clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock);
306 305
307 data = sfb->pdata->vidcon0; 306 data = sfb->pdata->vidcon0;
308 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); 307 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
@@ -359,8 +358,6 @@ static int s3c_fb_set_par(struct fb_info *info)
359 358
360 data = var->xres * var->yres; 359 data = var->xres * var->yres;
361 360
362 u32 osdc_data = 0;
363
364 osdc_data = VIDISD14C_ALPHA1_R(0xf) | 361 osdc_data = VIDISD14C_ALPHA1_R(0xf) |
365 VIDISD14C_ALPHA1_G(0xf) | 362 VIDISD14C_ALPHA1_G(0xf) |
366 VIDISD14C_ALPHA1_B(0xf); 363 VIDISD14C_ALPHA1_B(0xf);
@@ -967,7 +964,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
967 struct s3c_fb *sfb = platform_get_drvdata(pdev); 964 struct s3c_fb *sfb = platform_get_drvdata(pdev);
968 int win; 965 int win;
969 966
970 for (win = 0; win <= S3C_FB_MAX_WIN; win++) 967 for (win = 0; win < S3C_FB_MAX_WIN; win++)
971 if (sfb->windows[win]) 968 if (sfb->windows[win])
972 s3c_fb_release_win(sfb, sfb->windows[win]); 969 s3c_fb_release_win(sfb, sfb->windows[win]);
973 970
@@ -991,7 +988,7 @@ static int s3c_fb_suspend(struct platform_device *pdev, pm_message_t state)
991 struct s3c_fb_win *win; 988 struct s3c_fb_win *win;
992 int win_no; 989 int win_no;
993 990
994 for (win_no = S3C_FB_MAX_WIN; win_no >= 0; win_no--) { 991 for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
995 win = sfb->windows[win_no]; 992 win = sfb->windows[win_no];
996 if (!win) 993 if (!win)
997 continue; 994 continue;
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 653bdfee3057..9f6d6e61f0cc 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -120,18 +120,6 @@ static int sh7760_setcolreg (u_int regno,
120 return 0; 120 return 0;
121} 121}
122 122
123static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info,
124 unsigned long stride)
125{
126 memset(fix, 0, sizeof(struct fb_fix_screeninfo));
127 strcpy(fix->id, "sh7760-lcdc");
128
129 fix->smem_start = (unsigned long)info->screen_base;
130 fix->smem_len = info->screen_size;
131
132 fix->line_length = stride;
133}
134
135static int sh7760fb_get_color_info(struct device *dev, 123static int sh7760fb_get_color_info(struct device *dev,
136 u16 lddfr, int *bpp, int *gray) 124 u16 lddfr, int *bpp, int *gray)
137{ 125{
@@ -334,7 +322,8 @@ static int sh7760fb_set_par(struct fb_info *info)
334 322
335 iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */ 323 iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */
336 324
337 encode_fix(&info->fix, info, stride); 325 info->fix.line_length = stride;
326
338 sh7760fb_check_var(&info->var, info); 327 sh7760fb_check_var(&info->var, info);
339 328
340 sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */ 329 sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */
@@ -435,6 +424,8 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
435 424
436 info->screen_base = fbmem; 425 info->screen_base = fbmem;
437 info->screen_size = vram; 426 info->screen_size = vram;
427 info->fix.smem_start = (unsigned long)info->screen_base;
428 info->fix.smem_len = info->screen_size;
438 429
439 return 0; 430 return 0;
440} 431}
@@ -520,6 +511,8 @@ static int __devinit sh7760fb_probe(struct platform_device *pdev)
520 info->var.transp.length = 0; 511 info->var.transp.length = 0;
521 info->var.transp.msb_right = 0; 512 info->var.transp.msb_right = 0;
522 513
514 strcpy(info->fix.id, "sh7760-lcdc");
515
523 /* set the DON2 bit now, before cmap allocation, as it will randomize 516 /* set the DON2 bit now, before cmap allocation, as it will randomize
524 * palette memory. 517 * palette memory.
525 */ 518 */
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index f10d2fbeda06..8f24564f77b0 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -17,6 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/vmalloc.h>
20#include <video/sh_mobile_lcdc.h> 21#include <video/sh_mobile_lcdc.h>
21#include <asm/atomic.h> 22#include <asm/atomic.h>
22 23
@@ -30,9 +31,10 @@ struct sh_mobile_lcdc_chan {
30 unsigned long enabled; /* ME and SE in LDCNT2R */ 31 unsigned long enabled; /* ME and SE in LDCNT2R */
31 struct sh_mobile_lcdc_chan_cfg cfg; 32 struct sh_mobile_lcdc_chan_cfg cfg;
32 u32 pseudo_palette[PALETTE_NR]; 33 u32 pseudo_palette[PALETTE_NR];
33 struct fb_info info; 34 struct fb_info *info;
34 dma_addr_t dma_handle; 35 dma_addr_t dma_handle;
35 struct fb_deferred_io defio; 36 struct fb_deferred_io defio;
37 struct scatterlist *sglist;
36 unsigned long frame_end; 38 unsigned long frame_end;
37 wait_queue_head_t frame_end_wait; 39 wait_queue_head_t frame_end_wait;
38}; 40};
@@ -206,16 +208,38 @@ static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) {}
206static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {} 208static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {}
207#endif 209#endif
208 210
211static int sh_mobile_lcdc_sginit(struct fb_info *info,
212 struct list_head *pagelist)
213{
214 struct sh_mobile_lcdc_chan *ch = info->par;
215 unsigned int nr_pages_max = info->fix.smem_len >> PAGE_SHIFT;
216 struct page *page;
217 int nr_pages = 0;
218
219 sg_init_table(ch->sglist, nr_pages_max);
220
221 list_for_each_entry(page, pagelist, lru)
222 sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0);
223
224 return nr_pages;
225}
226
209static void sh_mobile_lcdc_deferred_io(struct fb_info *info, 227static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
210 struct list_head *pagelist) 228 struct list_head *pagelist)
211{ 229{
212 struct sh_mobile_lcdc_chan *ch = info->par; 230 struct sh_mobile_lcdc_chan *ch = info->par;
231 unsigned int nr_pages;
213 232
214 /* enable clocks before accessing hardware */ 233 /* enable clocks before accessing hardware */
215 sh_mobile_lcdc_clk_on(ch->lcdc); 234 sh_mobile_lcdc_clk_on(ch->lcdc);
216 235
236 nr_pages = sh_mobile_lcdc_sginit(info, pagelist);
237 dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
238
217 /* trigger panel update */ 239 /* trigger panel update */
218 lcdc_write_chan(ch, LDSM2R, 1); 240 lcdc_write_chan(ch, LDSM2R, 1);
241
242 dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
219} 243}
220 244
221static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) 245static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info)
@@ -418,22 +442,22 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
418 /* set bpp format in PKF[4:0] */ 442 /* set bpp format in PKF[4:0] */
419 tmp = lcdc_read_chan(ch, LDDFR); 443 tmp = lcdc_read_chan(ch, LDDFR);
420 tmp &= ~(0x0001001f); 444 tmp &= ~(0x0001001f);
421 tmp |= (priv->ch[k].info.var.bits_per_pixel == 16) ? 3 : 0; 445 tmp |= (ch->info->var.bits_per_pixel == 16) ? 3 : 0;
422 lcdc_write_chan(ch, LDDFR, tmp); 446 lcdc_write_chan(ch, LDDFR, tmp);
423 447
424 /* point out our frame buffer */ 448 /* point out our frame buffer */
425 lcdc_write_chan(ch, LDSA1R, ch->info.fix.smem_start); 449 lcdc_write_chan(ch, LDSA1R, ch->info->fix.smem_start);
426 450
427 /* set line size */ 451 /* set line size */
428 lcdc_write_chan(ch, LDMLSR, ch->info.fix.line_length); 452 lcdc_write_chan(ch, LDMLSR, ch->info->fix.line_length);
429 453
430 /* setup deferred io if SYS bus */ 454 /* setup deferred io if SYS bus */
431 tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; 455 tmp = ch->cfg.sys_bus_cfg.deferred_io_msec;
432 if (ch->ldmt1r_value & (1 << 12) && tmp) { 456 if (ch->ldmt1r_value & (1 << 12) && tmp) {
433 ch->defio.deferred_io = sh_mobile_lcdc_deferred_io; 457 ch->defio.deferred_io = sh_mobile_lcdc_deferred_io;
434 ch->defio.delay = msecs_to_jiffies(tmp); 458 ch->defio.delay = msecs_to_jiffies(tmp);
435 ch->info.fbdefio = &ch->defio; 459 ch->info->fbdefio = &ch->defio;
436 fb_deferred_io_init(&ch->info); 460 fb_deferred_io_init(ch->info);
437 461
438 /* one-shot mode */ 462 /* one-shot mode */
439 lcdc_write_chan(ch, LDSM1R, 1); 463 lcdc_write_chan(ch, LDSM1R, 1);
@@ -479,12 +503,12 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
479 * flush frame, and wait for frame end interrupt 503 * flush frame, and wait for frame end interrupt
480 * clean up deferred io and enable clock 504 * clean up deferred io and enable clock
481 */ 505 */
482 if (ch->info.fbdefio) { 506 if (ch->info->fbdefio) {
483 ch->frame_end = 0; 507 ch->frame_end = 0;
484 schedule_delayed_work(&ch->info.deferred_work, 0); 508 schedule_delayed_work(&ch->info->deferred_work, 0);
485 wait_event(ch->frame_end_wait, ch->frame_end); 509 wait_event(ch->frame_end_wait, ch->frame_end);
486 fb_deferred_io_cleanup(&ch->info); 510 fb_deferred_io_cleanup(ch->info);
487 ch->info.fbdefio = NULL; 511 ch->info->fbdefio = NULL;
488 sh_mobile_lcdc_clk_on(priv); 512 sh_mobile_lcdc_clk_on(priv);
489 } 513 }
490 514
@@ -793,9 +817,16 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
793 priv->base = ioremap_nocache(res->start, (res->end - res->start) + 1); 817 priv->base = ioremap_nocache(res->start, (res->end - res->start) + 1);
794 818
795 for (i = 0; i < j; i++) { 819 for (i = 0; i < j; i++) {
796 info = &priv->ch[i].info;
797 cfg = &priv->ch[i].cfg; 820 cfg = &priv->ch[i].cfg;
798 821
822 priv->ch[i].info = framebuffer_alloc(0, &pdev->dev);
823 if (!priv->ch[i].info) {
824 dev_err(&pdev->dev, "unable to allocate fb_info\n");
825 error = -ENOMEM;
826 break;
827 }
828
829 info = priv->ch[i].info;
799 info->fbops = &sh_mobile_lcdc_ops; 830 info->fbops = &sh_mobile_lcdc_ops;
800 info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres; 831 info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres;
801 info->var.yres = info->var.yres_virtual = cfg->lcd_cfg.yres; 832 info->var.yres = info->var.yres_virtual = cfg->lcd_cfg.yres;
@@ -846,21 +877,31 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
846 } 877 }
847 878
848 for (i = 0; i < j; i++) { 879 for (i = 0; i < j; i++) {
849 error = register_framebuffer(&priv->ch[i].info); 880 struct sh_mobile_lcdc_chan *ch = priv->ch + i;
881
882 info = ch->info;
883
884 if (info->fbdefio) {
885 priv->ch->sglist = vmalloc(sizeof(struct scatterlist) *
886 info->fix.smem_len >> PAGE_SHIFT);
887 if (!priv->ch->sglist) {
888 dev_err(&pdev->dev, "cannot allocate sglist\n");
889 goto err1;
890 }
891 }
892
893 error = register_framebuffer(info);
850 if (error < 0) 894 if (error < 0)
851 goto err1; 895 goto err1;
852 }
853 896
854 for (i = 0; i < j; i++) {
855 info = &priv->ch[i].info;
856 dev_info(info->dev, 897 dev_info(info->dev,
857 "registered %s/%s as %dx%d %dbpp.\n", 898 "registered %s/%s as %dx%d %dbpp.\n",
858 pdev->name, 899 pdev->name,
859 (priv->ch[i].cfg.chan == LCDC_CHAN_MAINLCD) ? 900 (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
860 "mainlcd" : "sublcd", 901 "mainlcd" : "sublcd",
861 (int) priv->ch[i].cfg.lcd_cfg.xres, 902 (int) ch->cfg.lcd_cfg.xres,
862 (int) priv->ch[i].cfg.lcd_cfg.yres, 903 (int) ch->cfg.lcd_cfg.yres,
863 priv->ch[i].cfg.bpp); 904 ch->cfg.bpp);
864 905
865 /* deferred io mode: disable clock to save power */ 906 /* deferred io mode: disable clock to save power */
866 if (info->fbdefio) 907 if (info->fbdefio)
@@ -881,20 +922,24 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
881 int i; 922 int i;
882 923
883 for (i = 0; i < ARRAY_SIZE(priv->ch); i++) 924 for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
884 if (priv->ch[i].info.dev) 925 if (priv->ch[i].info->dev)
885 unregister_framebuffer(&priv->ch[i].info); 926 unregister_framebuffer(priv->ch[i].info);
886 927
887 sh_mobile_lcdc_stop(priv); 928 sh_mobile_lcdc_stop(priv);
888 929
889 for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { 930 for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
890 info = &priv->ch[i].info; 931 info = priv->ch[i].info;
891 932
892 if (!info->device) 933 if (!info || !info->device)
893 continue; 934 continue;
894 935
936 if (priv->ch[i].sglist)
937 vfree(priv->ch[i].sglist);
938
895 dma_free_coherent(&pdev->dev, info->fix.smem_len, 939 dma_free_coherent(&pdev->dev, info->fix.smem_len,
896 info->screen_base, priv->ch[i].dma_handle); 940 info->screen_base, priv->ch[i].dma_handle);
897 fb_dealloc_cmap(&info->cmap); 941 fb_dealloc_cmap(&info->cmap);
942 framebuffer_release(info);
898 } 943 }
899 944
900#ifdef CONFIG_HAVE_CLK 945#ifdef CONFIG_HAVE_CLK
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 7072d19080d5..4a067f0d0ceb 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1847,8 +1847,10 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
1847 1847
1848 strcpy(fix->id, ivideo->myid); 1848 strcpy(fix->id, ivideo->myid);
1849 1849
1850 mutex_lock(&info->mm_lock);
1850 fix->smem_start = ivideo->video_base + ivideo->video_offset; 1851 fix->smem_start = ivideo->video_base + ivideo->video_offset;
1851 fix->smem_len = ivideo->sisfb_mem; 1852 fix->smem_len = ivideo->sisfb_mem;
1853 mutex_unlock(&info->mm_lock);
1852 fix->type = FB_TYPE_PACKED_PIXELS; 1854 fix->type = FB_TYPE_PACKED_PIXELS;
1853 fix->type_aux = 0; 1855 fix->type_aux = 0;
1854 fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; 1856 fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
@@ -6365,7 +6367,6 @@ error_3: vfree(ivideo->bios_abase);
6365 sis_fb_info->fix = ivideo->sisfb_fix; 6367 sis_fb_info->fix = ivideo->sisfb_fix;
6366 sis_fb_info->screen_base = ivideo->video_vbase + ivideo->video_offset; 6368 sis_fb_info->screen_base = ivideo->video_vbase + ivideo->video_offset;
6367 sis_fb_info->fbops = &sisfb_ops; 6369 sis_fb_info->fbops = &sisfb_ops;
6368 sisfb_get_fix(&sis_fb_info->fix, -1, sis_fb_info);
6369 sis_fb_info->pseudo_palette = ivideo->pseudo_palette; 6370 sis_fb_info->pseudo_palette = ivideo->pseudo_palette;
6370 6371
6371 fb_alloc_cmap(&sis_fb_info->cmap, 256 , 0); 6372 fb_alloc_cmap(&sis_fb_info->cmap, 256 , 0);
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index eb5d73a06702..924d79462780 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -145,7 +145,7 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
145#define SM501_MEMF_ACCEL (8) 145#define SM501_MEMF_ACCEL (8)
146 146
147static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, 147static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
148 unsigned int why, size_t size) 148 unsigned int why, size_t size, u32 smem_len)
149{ 149{
150 struct sm501fb_par *par; 150 struct sm501fb_par *par;
151 struct fb_info *fbi; 151 struct fb_info *fbi;
@@ -172,7 +172,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
172 if (ptr > 0) 172 if (ptr > 0)
173 ptr &= ~(PAGE_SIZE - 1); 173 ptr &= ~(PAGE_SIZE - 1);
174 174
175 if (fbi && ptr < fbi->fix.smem_len) 175 if (fbi && ptr < smem_len)
176 return -ENOMEM; 176 return -ENOMEM;
177 177
178 break; 178 break;
@@ -197,7 +197,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
197 197
198 case SM501_MEMF_ACCEL: 198 case SM501_MEMF_ACCEL:
199 fbi = inf->fb[HEAD_CRT]; 199 fbi = inf->fb[HEAD_CRT];
200 ptr = fbi ? fbi->fix.smem_len : 0; 200 ptr = fbi ? smem_len : 0;
201 201
202 fbi = inf->fb[HEAD_PANEL]; 202 fbi = inf->fb[HEAD_PANEL];
203 if (fbi) { 203 if (fbi) {
@@ -413,6 +413,7 @@ static int sm501fb_set_par_common(struct fb_info *info,
413 unsigned int mem_type; 413 unsigned int mem_type;
414 unsigned int clock_type; 414 unsigned int clock_type;
415 unsigned int head_addr; 415 unsigned int head_addr;
416 unsigned int smem_len;
416 417
417 dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n", 418 dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n",
418 __func__, var->xres, var->yres, var->bits_per_pixel, 419 __func__, var->xres, var->yres, var->bits_per_pixel,
@@ -453,18 +454,20 @@ static int sm501fb_set_par_common(struct fb_info *info,
453 454
454 /* allocate fb memory within 501 */ 455 /* allocate fb memory within 501 */
455 info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8; 456 info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8;
456 info->fix.smem_len = info->fix.line_length * var->yres_virtual; 457 smem_len = info->fix.line_length * var->yres_virtual;
457 458
458 dev_dbg(fbi->dev, "%s: line length = %u\n", __func__, 459 dev_dbg(fbi->dev, "%s: line length = %u\n", __func__,
459 info->fix.line_length); 460 info->fix.line_length);
460 461
461 if (sm501_alloc_mem(fbi, &par->screen, mem_type, 462 if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) {
462 info->fix.smem_len)) {
463 dev_err(fbi->dev, "no memory available\n"); 463 dev_err(fbi->dev, "no memory available\n");
464 return -ENOMEM; 464 return -ENOMEM;
465 } 465 }
466 466
467 mutex_lock(&info->mm_lock);
467 info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr; 468 info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr;
469 info->fix.smem_len = smem_len;
470 mutex_unlock(&info->mm_lock);
468 471
469 info->screen_base = fbi->fbmem + par->screen.sm_addr; 472 info->screen_base = fbi->fbmem + par->screen.sm_addr;
470 info->screen_size = info->fix.smem_len; 473 info->screen_size = info->fix.smem_len;
@@ -637,7 +640,8 @@ static int sm501fb_set_par_crt(struct fb_info *info)
637 if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) { 640 if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) {
638 /* the head is displaying panel data... */ 641 /* the head is displaying panel data... */
639 642
640 sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0); 643 sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0,
644 info->fix.smem_len);
641 goto out_update; 645 goto out_update;
642 } 646 }
643 647
@@ -1289,7 +1293,8 @@ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
1289 1293
1290 par->cursor_regs = info->regs + reg_base; 1294 par->cursor_regs = info->regs + reg_base;
1291 1295
1292 ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024); 1296 ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024,
1297 fbi->fix.smem_len);
1293 if (ret < 0) 1298 if (ret < 0)
1294 return ret; 1299 return ret;
1295 1300
@@ -1535,9 +1540,6 @@ static int sm501fb_init_fb(struct fb_info *fb,
1535 if (ret) 1540 if (ret)
1536 dev_err(info->dev, "check_var() failed on initial setup?\n"); 1541 dev_err(info->dev, "check_var() failed on initial setup?\n");
1537 1542
1538 /* ensure we've activated our new configuration */
1539 (fb->fbops->fb_set_par)(fb);
1540
1541 return 0; 1543 return 0;
1542} 1544}
1543 1545
@@ -1619,6 +1621,8 @@ static int __devinit sm501fb_start_one(struct sm501fb_info *info,
1619 if (!fbi) 1621 if (!fbi)
1620 return 0; 1622 return 0;
1621 1623
1624 mutex_init(&info->fb[head]->mm_lock);
1625
1622 ret = sm501fb_init_fb(info->fb[head], head, drvname); 1626 ret = sm501fb_init_fb(info->fb[head], head, drvname);
1623 if (ret) { 1627 if (ret) {
1624 dev_err(info->dev, "cannot initialise fb %s\n", drvname); 1628 dev_err(info->dev, "cannot initialise fb %s\n", drvname);
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index eec9dcb7f599..6120f0c526fe 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1115,10 +1115,9 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
1115 if the device name contains the string "DX" and tell the 1115 if the device name contains the string "DX" and tell the
1116 user how to reconfigure the card. */ 1116 user how to reconfigure the card. */
1117 if (strstr(sti->outptr.dev_name, "DX")) { 1117 if (strstr(sti->outptr.dev_name, "DX")) {
1118 printk(KERN_WARNING "WARNING: stifb framebuffer driver does not " 1118 printk(KERN_WARNING
1119 "support '%s' in double-buffer mode.\n" 1119"WARNING: stifb framebuffer driver does not support '%s' in double-buffer mode.\n"
1120 KERN_WARNING "WARNING: Please disable the double-buffer mode " 1120"WARNING: Please disable the double-buffer mode in IPL menu (the PARISC-BIOS).\n",
1121 "in IPL menu (the PARISC-BIOS).\n",
1122 sti->outptr.dev_name); 1121 sti->outptr.dev_name);
1123 goto out_err0; 1122 goto out_err0;
1124 } 1123 }
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index fcd53ceb88fa..c8960003f47d 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -2407,14 +2407,14 @@ int viafb_setmode(int vmode_index, int hor_res, int ver_res, int video_bpp,
2407 viafb_dvi_set_mode(viafb_get_mode_index 2407 viafb_dvi_set_mode(viafb_get_mode_index
2408 (viaparinfo->tmds_setting_info->h_active, 2408 (viaparinfo->tmds_setting_info->h_active,
2409 viaparinfo->tmds_setting_info-> 2409 viaparinfo->tmds_setting_info->
2410 v_active, 1), 2410 v_active),
2411 video_bpp1, viaparinfo-> 2411 video_bpp1, viaparinfo->
2412 tmds_setting_info->iga_path); 2412 tmds_setting_info->iga_path);
2413 } else { 2413 } else {
2414 viafb_dvi_set_mode(viafb_get_mode_index 2414 viafb_dvi_set_mode(viafb_get_mode_index
2415 (viaparinfo->tmds_setting_info->h_active, 2415 (viaparinfo->tmds_setting_info->h_active,
2416 viaparinfo-> 2416 viaparinfo->
2417 tmds_setting_info->v_active, 0), 2417 tmds_setting_info->v_active),
2418 video_bpp, viaparinfo-> 2418 video_bpp, viaparinfo->
2419 tmds_setting_info->iga_path); 2419 tmds_setting_info->iga_path);
2420 } 2420 }
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 6c7290a6a447..78c6b3387947 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -580,10 +580,7 @@ static void load_lcd_k400_patch_tbl(int set_hres, int set_vres,
580 int reg_num = 0; 580 int reg_num = 0;
581 struct io_reg *lcd_patch_reg = NULL; 581 struct io_reg *lcd_patch_reg = NULL;
582 582
583 if (viaparinfo->lvds_setting_info->iga_path == IGA2) 583 vmode_index = viafb_get_mode_index(set_hres, set_vres);
584 vmode_index = viafb_get_mode_index(set_hres, set_vres, 1);
585 else
586 vmode_index = viafb_get_mode_index(set_hres, set_vres, 0);
587 switch (panel_id) { 584 switch (panel_id) {
588 /* LCD 800x600 */ 585 /* LCD 800x600 */
589 case LCD_PANEL_ID1_800X600: 586 case LCD_PANEL_ID1_800X600:
@@ -761,10 +758,7 @@ static void load_lcd_p880_patch_tbl(int set_hres, int set_vres,
761 int reg_num = 0; 758 int reg_num = 0;
762 struct io_reg *lcd_patch_reg = NULL; 759 struct io_reg *lcd_patch_reg = NULL;
763 760
764 if (viaparinfo->lvds_setting_info->iga_path == IGA2) 761 vmode_index = viafb_get_mode_index(set_hres, set_vres);
765 vmode_index = viafb_get_mode_index(set_hres, set_vres, 1);
766 else
767 vmode_index = viafb_get_mode_index(set_hres, set_vres, 0);
768 762
769 switch (panel_id) { 763 switch (panel_id) {
770 case LCD_PANEL_ID5_1400X1050: 764 case LCD_PANEL_ID5_1400X1050:
@@ -832,10 +826,7 @@ static void load_lcd_patch_regs(int set_hres, int set_vres,
832{ 826{
833 int vmode_index; 827 int vmode_index;
834 828
835 if (viaparinfo->lvds_setting_info->iga_path == IGA2) 829 vmode_index = viafb_get_mode_index(set_hres, set_vres);
836 vmode_index = viafb_get_mode_index(set_hres, set_vres, 1);
837 else
838 vmode_index = viafb_get_mode_index(set_hres, set_vres, 0);
839 830
840 viafb_unlock_crt(); 831 viafb_unlock_crt();
841 832
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index a0fec298216e..72833f3334b5 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -32,7 +32,6 @@ static u32 pseudo_pal[17];
32/* video mode */ 32/* video mode */
33static char *viafb_mode = "640x480"; 33static char *viafb_mode = "640x480";
34static char *viafb_mode1 = "640x480"; 34static char *viafb_mode1 = "640x480";
35static int viafb_resMode = VIA_RES_640X480;
36 35
37/* Added for specifying active devices.*/ 36/* Added for specifying active devices.*/
38char *viafb_active_dev = ""; 37char *viafb_active_dev = "";
@@ -56,47 +55,47 @@ static void viafb_get_video_device(u32 *video_dev_info);
56 55
57/* Mode information */ 56/* Mode information */
58static const struct viafb_modeinfo viafb_modentry[] = { 57static const struct viafb_modeinfo viafb_modentry[] = {
59 {480, 640, VIA_RES_480X640, "480x640"}, 58 {480, 640, VIA_RES_480X640},
60 {640, 480, VIA_RES_640X480, "640x480"}, 59 {640, 480, VIA_RES_640X480},
61 {800, 480, VIA_RES_800X480, "800x480"}, 60 {800, 480, VIA_RES_800X480},
62 {800, 600, VIA_RES_800X600, "800x600"}, 61 {800, 600, VIA_RES_800X600},
63 {1024, 768, VIA_RES_1024X768, "1024x768"}, 62 {1024, 768, VIA_RES_1024X768},
64 {1152, 864, VIA_RES_1152X864, "1152x864"}, 63 {1152, 864, VIA_RES_1152X864},
65 {1280, 1024, VIA_RES_1280X1024, "1280x1024"}, 64 {1280, 1024, VIA_RES_1280X1024},
66 {1600, 1200, VIA_RES_1600X1200, "1600x1200"}, 65 {1600, 1200, VIA_RES_1600X1200},
67 {1440, 1050, VIA_RES_1440X1050, "1440x1050"}, 66 {1440, 1050, VIA_RES_1440X1050},
68 {1280, 768, VIA_RES_1280X768, "1280x768"}, 67 {1280, 768, VIA_RES_1280X768,},
69 {1280, 800, VIA_RES_1280X800, "1280x800"}, 68 {1280, 800, VIA_RES_1280X800},
70 {1280, 960, VIA_RES_1280X960, "1280x960"}, 69 {1280, 960, VIA_RES_1280X960},
71 {1920, 1440, VIA_RES_1920X1440, "1920x1440"}, 70 {1920, 1440, VIA_RES_1920X1440},
72 {848, 480, VIA_RES_848X480, "848x480"}, 71 {848, 480, VIA_RES_848X480},
73 {1400, 1050, VIA_RES_1400X1050, "1400x1050"}, 72 {1400, 1050, VIA_RES_1400X1050},
74 {720, 480, VIA_RES_720X480, "720x480"}, 73 {720, 480, VIA_RES_720X480},
75 {720, 576, VIA_RES_720X576, "720x576"}, 74 {720, 576, VIA_RES_720X576},
76 {1024, 512, VIA_RES_1024X512, "1024x512"}, 75 {1024, 512, VIA_RES_1024X512},
77 {1024, 576, VIA_RES_1024X576, "1024x576"}, 76 {1024, 576, VIA_RES_1024X576},
78 {1024, 600, VIA_RES_1024X600, "1024x600"}, 77 {1024, 600, VIA_RES_1024X600},
79 {1280, 720, VIA_RES_1280X720, "1280x720"}, 78 {1280, 720, VIA_RES_1280X720},
80 {1920, 1080, VIA_RES_1920X1080, "1920x1080"}, 79 {1920, 1080, VIA_RES_1920X1080},
81 {1366, 768, VIA_RES_1368X768, "1368x768"}, 80 {1366, 768, VIA_RES_1368X768},
82 {1680, 1050, VIA_RES_1680X1050, "1680x1050"}, 81 {1680, 1050, VIA_RES_1680X1050},
83 {960, 600, VIA_RES_960X600, "960x600"}, 82 {960, 600, VIA_RES_960X600},
84 {1000, 600, VIA_RES_1000X600, "1000x600"}, 83 {1000, 600, VIA_RES_1000X600},
85 {1024, 576, VIA_RES_1024X576, "1024x576"}, 84 {1024, 576, VIA_RES_1024X576},
86 {1024, 600, VIA_RES_1024X600, "1024x600"}, 85 {1024, 600, VIA_RES_1024X600},
87 {1088, 612, VIA_RES_1088X612, "1088x612"}, 86 {1088, 612, VIA_RES_1088X612},
88 {1152, 720, VIA_RES_1152X720, "1152x720"}, 87 {1152, 720, VIA_RES_1152X720},
89 {1200, 720, VIA_RES_1200X720, "1200x720"}, 88 {1200, 720, VIA_RES_1200X720},
90 {1280, 600, VIA_RES_1280X600, "1280x600"}, 89 {1280, 600, VIA_RES_1280X600},
91 {1360, 768, VIA_RES_1360X768, "1360x768"}, 90 {1360, 768, VIA_RES_1360X768},
92 {1440, 900, VIA_RES_1440X900, "1440x900"}, 91 {1440, 900, VIA_RES_1440X900},
93 {1600, 900, VIA_RES_1600X900, "1600x900"}, 92 {1600, 900, VIA_RES_1600X900},
94 {1600, 1024, VIA_RES_1600X1024, "1600x1024"}, 93 {1600, 1024, VIA_RES_1600X1024},
95 {1792, 1344, VIA_RES_1792X1344, "1792x1344"}, 94 {1792, 1344, VIA_RES_1792X1344},
96 {1856, 1392, VIA_RES_1856X1392, "1856x1392"}, 95 {1856, 1392, VIA_RES_1856X1392},
97 {1920, 1200, VIA_RES_1920X1200, "1920x1200"}, 96 {1920, 1200, VIA_RES_1920X1200},
98 {2048, 1536, VIA_RES_2048X1536, "2048x1536"}, 97 {2048, 1536, VIA_RES_2048X1536},
99 {0, 0, VIA_RES_INVALID, "640x480"} 98 {0, 0, VIA_RES_INVALID}
100}; 99};
101 100
102static struct fb_ops viafb_ops; 101static struct fb_ops viafb_ops;
@@ -177,7 +176,7 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
177 if (var->vmode & FB_VMODE_INTERLACED || var->vmode & FB_VMODE_DOUBLE) 176 if (var->vmode & FB_VMODE_INTERLACED || var->vmode & FB_VMODE_DOUBLE)
178 return -EINVAL; 177 return -EINVAL;
179 178
180 vmode_index = viafb_get_mode_index(var->xres, var->yres, 0); 179 vmode_index = viafb_get_mode_index(var->xres, var->yres);
181 if (vmode_index == VIA_RES_INVALID) { 180 if (vmode_index == VIA_RES_INVALID) {
182 DEBUG_MSG(KERN_INFO 181 DEBUG_MSG(KERN_INFO
183 "viafb: Mode %dx%dx%d not supported!!\n", 182 "viafb: Mode %dx%dx%d not supported!!\n",
@@ -233,14 +232,14 @@ static int viafb_set_par(struct fb_info *info)
233 viafb_update_device_setting(info->var.xres, info->var.yres, 232 viafb_update_device_setting(info->var.xres, info->var.yres,
234 info->var.bits_per_pixel, viafb_refresh, 0); 233 info->var.bits_per_pixel, viafb_refresh, 0);
235 234
236 vmode_index = viafb_get_mode_index(info->var.xres, info->var.yres, 0); 235 vmode_index = viafb_get_mode_index(info->var.xres, info->var.yres);
237 236
238 if (viafb_SAMM_ON == 1) { 237 if (viafb_SAMM_ON == 1) {
239 DEBUG_MSG(KERN_INFO 238 DEBUG_MSG(KERN_INFO
240 "viafb_second_xres = %d, viafb_second_yres = %d, bpp = %d\n", 239 "viafb_second_xres = %d, viafb_second_yres = %d, bpp = %d\n",
241 viafb_second_xres, viafb_second_yres, viafb_bpp1); 240 viafb_second_xres, viafb_second_yres, viafb_bpp1);
242 vmode_index1 = viafb_get_mode_index(viafb_second_xres, 241 vmode_index1 = viafb_get_mode_index(viafb_second_xres,
243 viafb_second_yres, 1); 242 viafb_second_yres);
244 DEBUG_MSG(KERN_INFO "->viafb_SAMM_ON: index=%d\n", 243 DEBUG_MSG(KERN_INFO "->viafb_SAMM_ON: index=%d\n",
245 vmode_index1); 244 vmode_index1);
246 245
@@ -1262,7 +1261,7 @@ static int viafb_sync(struct fb_info *info)
1262 return 0; 1261 return 0;
1263} 1262}
1264 1263
1265int viafb_get_mode_index(int hres, int vres, int flag) 1264int viafb_get_mode_index(int hres, int vres)
1266{ 1265{
1267 u32 i; 1266 u32 i;
1268 DEBUG_MSG(KERN_INFO "viafb_get_mode_index!\n"); 1267 DEBUG_MSG(KERN_INFO "viafb_get_mode_index!\n");
@@ -1272,13 +1271,7 @@ int viafb_get_mode_index(int hres, int vres, int flag)
1272 viafb_modentry[i].yres == vres) 1271 viafb_modentry[i].yres == vres)
1273 break; 1272 break;
1274 1273
1275 viafb_resMode = viafb_modentry[i].mode_index; 1274 return viafb_modentry[i].mode_index;
1276 if (flag)
1277 viafb_mode1 = viafb_modentry[i].mode_res;
1278 else
1279 viafb_mode = viafb_modentry[i].mode_res;
1280
1281 return viafb_resMode;
1282} 1275}
1283 1276
1284static void check_available_device_to_enable(int device_id) 1277static void check_available_device_to_enable(int device_id)
@@ -2199,7 +2192,7 @@ static int __devinit via_pci_probe(void)
2199 strict_strtoul(tmpc, 0, &default_xres); 2192 strict_strtoul(tmpc, 0, &default_xres);
2200 strict_strtoul(tmpm, 0, &default_yres); 2193 strict_strtoul(tmpm, 0, &default_yres);
2201 2194
2202 vmode_index = viafb_get_mode_index(default_xres, default_yres, 0); 2195 vmode_index = viafb_get_mode_index(default_xres, default_yres);
2203 DEBUG_MSG(KERN_INFO "0->index=%d\n", vmode_index); 2196 DEBUG_MSG(KERN_INFO "0->index=%d\n", vmode_index);
2204 2197
2205 if (viafb_SAMM_ON == 1) { 2198 if (viafb_SAMM_ON == 1) {
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h
index a4158e872878..227b000feb38 100644
--- a/drivers/video/via/viafbdev.h
+++ b/drivers/video/via/viafbdev.h
@@ -81,7 +81,6 @@ struct viafb_modeinfo {
81 u32 xres; 81 u32 xres;
82 u32 yres; 82 u32 yres;
83 int mode_index; 83 int mode_index;
84 char *mode_res;
85}; 84};
86extern unsigned int viafb_second_virtual_yres; 85extern unsigned int viafb_second_virtual_yres;
87extern unsigned int viafb_second_virtual_xres; 86extern unsigned int viafb_second_virtual_xres;
@@ -102,7 +101,7 @@ extern int strict_strtoul(const char *cp, unsigned int base,
102void viafb_memory_pitch_patch(struct fb_info *info); 101void viafb_memory_pitch_patch(struct fb_info *info);
103void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh, 102void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
104 int mode_index); 103 int mode_index);
105int viafb_get_mode_index(int hres, int vres, int flag); 104int viafb_get_mode_index(int hres, int vres);
106u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information 105u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
107 *plvds_setting_info, struct lvds_chip_information 106 *plvds_setting_info, struct lvds_chip_information
108 *plvds_chip_info, u8 index); 107 *plvds_chip_info, u8 index);
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index d0674f1e3f10..2376f688ec8b 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -523,6 +523,7 @@ static int w100fb_set_par(struct fb_info *info)
523 info->fix.ywrapstep = 0; 523 info->fix.ywrapstep = 0;
524 info->fix.line_length = par->xres * BITS_PER_PIXEL / 8; 524 info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
525 525
526 mutex_lock(&info->mm_lock);
526 if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) { 527 if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
527 par->extmem_active = 1; 528 par->extmem_active = 1;
528 info->fix.smem_len = par->mach->mem->size+1; 529 info->fix.smem_len = par->mach->mem->size+1;
@@ -530,6 +531,7 @@ static int w100fb_set_par(struct fb_info *info)
530 par->extmem_active = 0; 531 par->extmem_active = 0;
531 info->fix.smem_len = MEM_INT_SIZE+1; 532 info->fix.smem_len = MEM_INT_SIZE+1;
532 } 533 }
534 mutex_unlock(&info->mm_lock);
533 535
534 w100fb_activate_var(par); 536 w100fb_activate_var(par);
535 } 537 }
@@ -746,8 +748,6 @@ int __init w100fb_probe(struct platform_device *pdev)
746 goto out; 748 goto out;
747 } 749 }
748 750
749 w100fb_set_par(info);
750
751 if (register_framebuffer(info) < 0) { 751 if (register_framebuffer(info) < 0) {
752 err = -EINVAL; 752 err = -EINVAL;
753 goto out; 753 goto out;
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 193c8f0e5cc5..248e00ec4dc1 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -52,8 +52,10 @@ struct virtio_pci_device
52 char (*msix_names)[256]; 52 char (*msix_names)[256];
53 /* Number of available vectors */ 53 /* Number of available vectors */
54 unsigned msix_vectors; 54 unsigned msix_vectors;
55 /* Vectors allocated */ 55 /* Vectors allocated, excluding per-vq vectors if any */
56 unsigned msix_used_vectors; 56 unsigned msix_used_vectors;
57 /* Whether we have vector per vq */
58 bool per_vq_vectors;
57}; 59};
58 60
59/* Constants for MSI-X */ 61/* Constants for MSI-X */
@@ -258,7 +260,6 @@ static void vp_free_vectors(struct virtio_device *vdev)
258 260
259 for (i = 0; i < vp_dev->msix_used_vectors; ++i) 261 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
260 free_irq(vp_dev->msix_entries[i].vector, vp_dev); 262 free_irq(vp_dev->msix_entries[i].vector, vp_dev);
261 vp_dev->msix_used_vectors = 0;
262 263
263 if (vp_dev->msix_enabled) { 264 if (vp_dev->msix_enabled) {
264 /* Disable the vector used for configuration */ 265 /* Disable the vector used for configuration */
@@ -267,80 +268,77 @@ static void vp_free_vectors(struct virtio_device *vdev)
267 /* Flush the write out to device */ 268 /* Flush the write out to device */
268 ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); 269 ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
269 270
270 vp_dev->msix_enabled = 0;
271 pci_disable_msix(vp_dev->pci_dev); 271 pci_disable_msix(vp_dev->pci_dev);
272 vp_dev->msix_enabled = 0;
273 vp_dev->msix_vectors = 0;
272 } 274 }
273}
274 275
275static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries, 276 vp_dev->msix_used_vectors = 0;
276 int *options, int noptions) 277 kfree(vp_dev->msix_names);
277{ 278 vp_dev->msix_names = NULL;
278 int i; 279 kfree(vp_dev->msix_entries);
279 for (i = 0; i < noptions; ++i) 280 vp_dev->msix_entries = NULL;
280 if (!pci_enable_msix(dev, entries, options[i]))
281 return options[i];
282 return -EBUSY;
283} 281}
284 282
285static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) 283static int vp_request_vectors(struct virtio_device *vdev, int nvectors,
284 bool per_vq_vectors)
286{ 285{
287 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 286 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
288 const char *name = dev_name(&vp_dev->vdev.dev); 287 const char *name = dev_name(&vp_dev->vdev.dev);
289 unsigned i, v; 288 unsigned i, v;
290 int err = -ENOMEM; 289 int err = -ENOMEM;
291 /* We want at most one vector per queue and one for config changes. 290
292 * Fallback to separate vectors for config and a shared for queues. 291 if (!nvectors) {
293 * Finally fall back to regular interrupts. */ 292 /* Can't allocate MSI-X vectors, use regular interrupt */
294 int options[] = { max_vqs + 1, 2 }; 293 vp_dev->msix_vectors = 0;
295 int nvectors = max(options[0], options[1]); 294 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
295 IRQF_SHARED, name, vp_dev);
296 if (err)
297 return err;
298 vp_dev->intx_enabled = 1;
299 return 0;
300 }
296 301
297 vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, 302 vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
298 GFP_KERNEL); 303 GFP_KERNEL);
299 if (!vp_dev->msix_entries) 304 if (!vp_dev->msix_entries)
300 goto error_entries; 305 goto error;
301 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, 306 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
302 GFP_KERNEL); 307 GFP_KERNEL);
303 if (!vp_dev->msix_names) 308 if (!vp_dev->msix_names)
304 goto error_names; 309 goto error;
305 310
306 for (i = 0; i < nvectors; ++i) 311 for (i = 0; i < nvectors; ++i)
307 vp_dev->msix_entries[i].entry = i; 312 vp_dev->msix_entries[i].entry = i;
308 313
309 err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, 314 err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors);
310 options, ARRAY_SIZE(options)); 315 if (err > 0)
311 if (err < 0) { 316 err = -ENOSPC;
312 /* Can't allocate enough MSI-X vectors, use regular interrupt */ 317 if (err)
313 vp_dev->msix_vectors = 0; 318 goto error;
314 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, 319 vp_dev->msix_vectors = nvectors;
315 IRQF_SHARED, name, vp_dev); 320 vp_dev->msix_enabled = 1;
316 if (err) 321
317 goto error_irq; 322 /* Set the vector used for configuration */
318 vp_dev->intx_enabled = 1; 323 v = vp_dev->msix_used_vectors;
319 } else { 324 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
320 vp_dev->msix_vectors = err; 325 "%s-config", name);
321 vp_dev->msix_enabled = 1; 326 err = request_irq(vp_dev->msix_entries[v].vector,
322 327 vp_config_changed, 0, vp_dev->msix_names[v],
323 /* Set the vector used for configuration */ 328 vp_dev);
324 v = vp_dev->msix_used_vectors; 329 if (err)
325 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, 330 goto error;
326 "%s-config", name); 331 ++vp_dev->msix_used_vectors;
327 err = request_irq(vp_dev->msix_entries[v].vector, 332
328 vp_config_changed, 0, vp_dev->msix_names[v], 333 iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
329 vp_dev); 334 /* Verify we had enough resources to assign the vector */
330 if (err) 335 v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
331 goto error_irq; 336 if (v == VIRTIO_MSI_NO_VECTOR) {
332 ++vp_dev->msix_used_vectors; 337 err = -EBUSY;
333 338 goto error;
334 iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
335 /* Verify we had enough resources to assign the vector */
336 v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
337 if (v == VIRTIO_MSI_NO_VECTOR) {
338 err = -EBUSY;
339 goto error_irq;
340 }
341 } 339 }
342 340
343 if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) { 341 if (!per_vq_vectors) {
344 /* Shared vector for all VQs */ 342 /* Shared vector for all VQs */
345 v = vp_dev->msix_used_vectors; 343 v = vp_dev->msix_used_vectors;
346 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, 344 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
@@ -349,28 +347,25 @@ static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs)
349 vp_vring_interrupt, 0, vp_dev->msix_names[v], 347 vp_vring_interrupt, 0, vp_dev->msix_names[v],
350 vp_dev); 348 vp_dev);
351 if (err) 349 if (err)
352 goto error_irq; 350 goto error;
353 ++vp_dev->msix_used_vectors; 351 ++vp_dev->msix_used_vectors;
354 } 352 }
355 return 0; 353 return 0;
356error_irq: 354error:
357 vp_free_vectors(vdev); 355 vp_free_vectors(vdev);
358 kfree(vp_dev->msix_names);
359error_names:
360 kfree(vp_dev->msix_entries);
361error_entries:
362 return err; 356 return err;
363} 357}
364 358
365static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, 359static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
366 void (*callback)(struct virtqueue *vq), 360 void (*callback)(struct virtqueue *vq),
367 const char *name) 361 const char *name,
362 u16 vector)
368{ 363{
369 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 364 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
370 struct virtio_pci_vq_info *info; 365 struct virtio_pci_vq_info *info;
371 struct virtqueue *vq; 366 struct virtqueue *vq;
372 unsigned long flags, size; 367 unsigned long flags, size;
373 u16 num, vector; 368 u16 num;
374 int err; 369 int err;
375 370
376 /* Select the queue we're interested in */ 371 /* Select the queue we're interested in */
@@ -389,7 +384,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
389 384
390 info->queue_index = index; 385 info->queue_index = index;
391 info->num = num; 386 info->num = num;
392 info->vector = VIRTIO_MSI_NO_VECTOR; 387 info->vector = vector;
393 388
394 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); 389 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
395 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); 390 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
@@ -413,22 +408,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
413 vq->priv = info; 408 vq->priv = info;
414 info->vq = vq; 409 info->vq = vq;
415 410
416 /* allocate per-vq vector if available and necessary */ 411 if (vector != VIRTIO_MSI_NO_VECTOR) {
417 if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) {
418 vector = vp_dev->msix_used_vectors;
419 snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names,
420 "%s-%s", dev_name(&vp_dev->vdev.dev), name);
421 err = request_irq(vp_dev->msix_entries[vector].vector,
422 vring_interrupt, 0,
423 vp_dev->msix_names[vector], vq);
424 if (err)
425 goto out_request_irq;
426 info->vector = vector;
427 ++vp_dev->msix_used_vectors;
428 } else
429 vector = VP_MSIX_VQ_VECTOR;
430
431 if (callback && vp_dev->msix_enabled) {
432 iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); 412 iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
433 vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); 413 vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
434 if (vector == VIRTIO_MSI_NO_VECTOR) { 414 if (vector == VIRTIO_MSI_NO_VECTOR) {
@@ -444,11 +424,6 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
444 return vq; 424 return vq;
445 425
446out_assign: 426out_assign:
447 if (info->vector != VIRTIO_MSI_NO_VECTOR) {
448 free_irq(vp_dev->msix_entries[info->vector].vector, vq);
449 --vp_dev->msix_used_vectors;
450 }
451out_request_irq:
452 vring_del_virtqueue(vq); 427 vring_del_virtqueue(vq);
453out_activate_queue: 428out_activate_queue:
454 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 429 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
@@ -462,12 +437,13 @@ static void vp_del_vq(struct virtqueue *vq)
462{ 437{
463 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 438 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
464 struct virtio_pci_vq_info *info = vq->priv; 439 struct virtio_pci_vq_info *info = vq->priv;
465 unsigned long size; 440 unsigned long flags, size;
466 441
467 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); 442 spin_lock_irqsave(&vp_dev->lock, flags);
443 list_del(&info->node);
444 spin_unlock_irqrestore(&vp_dev->lock, flags);
468 445
469 if (info->vector != VIRTIO_MSI_NO_VECTOR) 446 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
470 free_irq(vp_dev->msix_entries[info->vector].vector, vq);
471 447
472 if (vp_dev->msix_enabled) { 448 if (vp_dev->msix_enabled) {
473 iowrite16(VIRTIO_MSI_NO_VECTOR, 449 iowrite16(VIRTIO_MSI_NO_VECTOR,
@@ -489,36 +465,62 @@ static void vp_del_vq(struct virtqueue *vq)
489/* the config->del_vqs() implementation */ 465/* the config->del_vqs() implementation */
490static void vp_del_vqs(struct virtio_device *vdev) 466static void vp_del_vqs(struct virtio_device *vdev)
491{ 467{
468 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
492 struct virtqueue *vq, *n; 469 struct virtqueue *vq, *n;
470 struct virtio_pci_vq_info *info;
493 471
494 list_for_each_entry_safe(vq, n, &vdev->vqs, list) 472 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
473 info = vq->priv;
474 if (vp_dev->per_vq_vectors)
475 free_irq(vp_dev->msix_entries[info->vector].vector, vq);
495 vp_del_vq(vq); 476 vp_del_vq(vq);
477 }
478 vp_dev->per_vq_vectors = false;
496 479
497 vp_free_vectors(vdev); 480 vp_free_vectors(vdev);
498} 481}
499 482
500/* the config->find_vqs() implementation */ 483static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
501static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, 484 struct virtqueue *vqs[],
502 struct virtqueue *vqs[], 485 vq_callback_t *callbacks[],
503 vq_callback_t *callbacks[], 486 const char *names[],
504 const char *names[]) 487 int nvectors,
488 bool per_vq_vectors)
505{ 489{
506 int vectors = 0; 490 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
507 int i, err; 491 u16 vector;
508 492 int i, err, allocated_vectors;
509 /* How many vectors would we like? */
510 for (i = 0; i < nvqs; ++i)
511 if (callbacks[i])
512 ++vectors;
513 493
514 err = vp_request_vectors(vdev, vectors); 494 err = vp_request_vectors(vdev, nvectors, per_vq_vectors);
515 if (err) 495 if (err)
516 goto error_request; 496 goto error_request;
517 497
498 vp_dev->per_vq_vectors = per_vq_vectors;
499 allocated_vectors = vp_dev->msix_used_vectors;
518 for (i = 0; i < nvqs; ++i) { 500 for (i = 0; i < nvqs; ++i) {
519 vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); 501 if (!callbacks[i] || !vp_dev->msix_enabled)
520 if (IS_ERR(vqs[i])) 502 vector = VIRTIO_MSI_NO_VECTOR;
503 else if (vp_dev->per_vq_vectors)
504 vector = allocated_vectors++;
505 else
506 vector = VP_MSIX_VQ_VECTOR;
507 vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i], vector);
508 if (IS_ERR(vqs[i])) {
509 err = PTR_ERR(vqs[i]);
521 goto error_find; 510 goto error_find;
511 }
512 /* allocate per-vq irq if available and necessary */
513 if (vp_dev->per_vq_vectors && vector != VIRTIO_MSI_NO_VECTOR) {
514 snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names,
515 "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]);
516 err = request_irq(vp_dev->msix_entries[vector].vector,
517 vring_interrupt, 0,
518 vp_dev->msix_names[vector], vqs[i]);
519 if (err) {
520 vp_del_vq(vqs[i]);
521 goto error_find;
522 }
523 }
522 } 524 }
523 return 0; 525 return 0;
524 526
@@ -526,7 +528,37 @@ error_find:
526 vp_del_vqs(vdev); 528 vp_del_vqs(vdev);
527 529
528error_request: 530error_request:
529 return PTR_ERR(vqs[i]); 531 return err;
532}
533
534/* the config->find_vqs() implementation */
535static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
536 struct virtqueue *vqs[],
537 vq_callback_t *callbacks[],
538 const char *names[])
539{
540 int vectors = 0;
541 int i, uninitialized_var(err);
542
543 /* How many vectors would we like? */
544 for (i = 0; i < nvqs; ++i)
545 if (callbacks[i])
546 ++vectors;
547
548 /* We want at most one vector per queue and one for config changes. */
549 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
550 vectors + 1, true);
551 if (!err)
552 return 0;
553 /* Fallback to separate vectors for config and a shared for queues. */
554 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
555 2, false);
556 if (!err)
557 return 0;
558 /* Finally fall back to regular interrupts. */
559 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
560 0, false);
561 return err;
530} 562}
531 563
532static struct virtio_config_ops virtio_pci_config_ops = { 564static struct virtio_config_ops virtio_pci_config_ops = {
@@ -669,7 +701,7 @@ static int __init virtio_pci_init(void)
669 701
670 err = pci_register_driver(&virtio_pci_driver); 702 err = pci_register_driver(&virtio_pci_driver);
671 if (err) 703 if (err)
672 device_unregister(virtio_pci_root); 704 root_device_unregister(virtio_pci_root);
673 705
674 return err; 706 return err;
675} 707}
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig
index f6542211db48..a9efb1625321 100644
--- a/drivers/vlynq/Kconfig
+++ b/drivers/vlynq/Kconfig
@@ -13,7 +13,7 @@ config VLYNQ
13 13
14config VLYNQ_DEBUG 14config VLYNQ_DEBUG
15 bool "VLYNQ bus debug" 15 bool "VLYNQ bus debug"
16 depends on VLYNQ && KERNEL_DEBUG 16 depends on VLYNQ && DEBUG_KERNEL
17 help 17 help
18 Turn on VLYNQ bus debugging. 18 Turn on VLYNQ bus debugging.
19 19
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
index 7335433b067b..f05d2a368367 100644
--- a/drivers/vlynq/vlynq.c
+++ b/drivers/vlynq/vlynq.c
@@ -76,7 +76,7 @@ struct vlynq_regs {
76 u32 int_device[8]; 76 u32 int_device[8];
77}; 77};
78 78
79#ifdef VLYNQ_DEBUG 79#ifdef CONFIG_VLYNQ_DEBUG
80static void vlynq_dump_regs(struct vlynq_device *dev) 80static void vlynq_dump_regs(struct vlynq_device *dev)
81{ 81{
82 int i; 82 int i;
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index a7e3b706b9d3..0d92969404c3 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -687,6 +687,7 @@ static int omap_hdq_remove(struct platform_device *pdev)
687 687
688 if (hdq_data->hdq_usecount) { 688 if (hdq_data->hdq_usecount) {
689 dev_dbg(&pdev->dev, "removed when use count is not zero\n"); 689 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
690 mutex_unlock(&hdq_data->hdq_mutex);
690 return -EBUSY; 691 return -EBUSY;
691 } 692 }
692 693
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 5c7011cda6a6..751c003864ad 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -161,7 +161,7 @@ static long bcm47xx_wdt_ioctl(struct file *file,
161{ 161{
162 void __user *argp = (void __user *)arg; 162 void __user *argp = (void __user *)arg;
163 int __user *p = argp; 163 int __user *p = argp;
164 int new_value, retval = -EINVAL;; 164 int new_value, retval = -EINVAL;
165 165
166 switch (cmd) { 166 switch (cmd) {
167 case WDIOC_GETSUPPORT: 167 case WDIOC_GETSUPPORT:
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index fecb307d28e9..aec7cefdef21 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -18,6 +18,7 @@
18#include <linux/bitops.h> 18#include <linux/bitops.h>
19#include <linux/uaccess.h> 19#include <linux/uaccess.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/delay.h>
21 22
22#define DRV_NAME "WDOG COH 901 327" 23#define DRV_NAME "WDOG COH 901 327"
23 24
@@ -92,6 +93,8 @@ static struct clk *clk;
92static void coh901327_enable(u16 timeout) 93static void coh901327_enable(u16 timeout)
93{ 94{
94 u16 val; 95 u16 val;
96 unsigned long freq;
97 unsigned long delay_ns;
95 98
96 clk_enable(clk); 99 clk_enable(clk);
97 /* Restart timer if it is disabled */ 100 /* Restart timer if it is disabled */
@@ -102,6 +105,14 @@ static void coh901327_enable(u16 timeout)
102 /* Acknowledge any pending interrupt so it doesn't just fire off */ 105 /* Acknowledge any pending interrupt so it doesn't just fire off */
103 writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE, 106 writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
104 virtbase + U300_WDOG_IER); 107 virtbase + U300_WDOG_IER);
108 /*
109 * The interrupt is cleared in the 32 kHz clock domain.
110 * Wait 3 32 kHz cycles for it to take effect
111 */
112 freq = clk_get_rate(clk);
113 delay_ns = (1000000000 + freq - 1) / freq; /* Freq to ns and round up */
114 delay_ns = 3 * delay_ns; /* Wait 3 cycles */
115 ndelay(delay_ns);
105 /* Enable the watchdog interrupt */ 116 /* Enable the watchdog interrupt */
106 writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR); 117 writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR);
107 /* Activate the watchdog timer */ 118 /* Activate the watchdog timer */
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index e9f950ff86ea..cdd55e0d09f8 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -29,6 +29,7 @@
29#include <linux/watchdog.h> 29#include <linux/watchdog.h>
30#include <linux/timer.h> 30#include <linux/timer.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/io.h>
32#include <mach/hardware.h> 33#include <mach/hardware.h>
33 34
34#define WDT_VERSION "0.3" 35#define WDT_VERSION "0.3"
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 00b03eb43bf0..e1c82769b08e 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -66,7 +66,7 @@ static inline void ks8695_wdt_stop(void)
66static inline void ks8695_wdt_start(void) 66static inline void ks8695_wdt_start(void)
67{ 67{
68 unsigned long tmcon; 68 unsigned long tmcon;
69 unsigned long tval = wdt_time * CLOCK_TICK_RATE; 69 unsigned long tval = wdt_time * KS8695_CLOCK_RATE;
70 70
71 spin_lock(&ks8695_lock); 71 spin_lock(&ks8695_lock);
72 /* disable timer0 */ 72 /* disable timer0 */
@@ -103,7 +103,7 @@ static inline void ks8695_wdt_reload(void)
103static int ks8695_wdt_settimeout(int new_time) 103static int ks8695_wdt_settimeout(int new_time)
104{ 104{
105 /* 105 /*
106 * All counting occurs at SLOW_CLOCK / 128 = 0.256 Hz 106 * All counting occurs at KS8695_CLOCK_RATE / 128 = 0.256 Hz
107 * 107 *
108 * Since WDV is a 16-bit counter, the maximum period is 108 * Since WDV is a 16-bit counter, the maximum period is
109 * 65536 / 0.256 = 256 seconds. 109 * 65536 / 0.256 = 256 seconds.
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index ee1caae4d33b..016245419fad 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -38,7 +38,7 @@
38 38
39static unsigned long oscr_freq; 39static unsigned long oscr_freq;
40static unsigned long sa1100wdt_users; 40static unsigned long sa1100wdt_users;
41static int pre_margin; 41static unsigned int pre_margin;
42static int boot_status; 42static int boot_status;
43 43
44/* 44/*
@@ -84,6 +84,7 @@ static const struct watchdog_info ident = {
84 .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT 84 .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT
85 | WDIOF_KEEPALIVEPING, 85 | WDIOF_KEEPALIVEPING,
86 .identity = "SA1100/PXA255 Watchdog", 86 .identity = "SA1100/PXA255 Watchdog",
87 .firmware_version = 1,
87}; 88};
88 89
89static long sa1100dog_ioctl(struct file *file, unsigned int cmd, 90static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
@@ -118,7 +119,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
118 if (ret) 119 if (ret)
119 break; 120 break;
120 121
121 if (time <= 0 || time > 255) { 122 if (time <= 0 || (oscr_freq * (long long)time >= 0xffffffff)) {
122 ret = -EINVAL; 123 ret = -EINVAL;
123 break; 124 break;
124 } 125 }
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 916890abffdd..f201accc4e3d 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -89,6 +89,11 @@ static void w83627hf_select_wd_register(void)
89 c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */ 89 c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */
90 outb_p(0x2b, WDT_EFER); 90 outb_p(0x2b, WDT_EFER);
91 outb_p(c, WDT_EFDR); /* set GPIO3 to WDT0 */ 91 outb_p(c, WDT_EFDR); /* set GPIO3 to WDT0 */
92 } else if (c == 0x88) { /* W83627EHF */
93 outb_p(0x2d, WDT_EFER); /* select GPIO5 */
94 c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */
95 outb_p(0x2d, WDT_EFER);
96 outb_p(c, WDT_EFDR); /* set GPIO5 to WDT0 */
92 } 97 }
93 98
94 outb_p(0x07, WDT_EFER); /* point to logical device number reg */ 99 outb_p(0x07, WDT_EFER); /* point to logical device number reg */
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index 883b5f79673a..a6c12dec91a1 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -149,8 +149,10 @@ static void wdt_ctrl(int timeout)
149{ 149{
150 spin_lock(&io_lock); 150 spin_lock(&io_lock);
151 151
152 if (w83697ug_select_wd_register() < 0) 152 if (w83697ug_select_wd_register() < 0) {
153 spin_unlock(&io_lock);
153 return; 154 return;
155 }
154 156
155 outb_p(0xF4, WDT_EFER); /* Select CRF4 */ 157 outb_p(0xF4, WDT_EFER); /* Select CRF4 */
156 outb_p(timeout, WDT_EFDR); /* Write Timeout counter to CRF4 */ 158 outb_p(timeout, WDT_EFDR); /* Write Timeout counter to CRF4 */
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index a4fe7a38d9b0..3bde56bce63a 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -218,16 +218,14 @@ static void wdrtas_timer_keepalive(void)
218 */ 218 */
219static int wdrtas_get_temperature(void) 219static int wdrtas_get_temperature(void)
220{ 220{
221 long result; 221 int result;
222 int temperature = 0; 222 int temperature = 0;
223 223
224 result = rtas_call(wdrtas_token_get_sensor_state, 2, 2, 224 result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature);
225 (void *)__pa(&temperature),
226 WDRTAS_THERMAL_SENSOR, 0);
227 225
228 if (result < 0) 226 if (result < 0)
229 printk(KERN_WARNING "wdrtas: reading the thermal sensor " 227 printk(KERN_WARNING "wdrtas: reading the thermal sensor "
230 "faild: %li\n", result); 228 "failed: %i\n", result);
231 else 229 else
232 temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */ 230 temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */
233 231
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 891d2e90753a..abad71b1632b 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -927,9 +927,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
927void __init xen_init_IRQ(void) 927void __init xen_init_IRQ(void)
928{ 928{
929 int i; 929 int i;
930 size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
931 930
932 cpu_evtchn_mask_p = alloc_bootmem(size); 931 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
932 GFP_KERNEL);
933 BUG_ON(cpu_evtchn_mask_p == NULL); 933 BUG_ON(cpu_evtchn_mask_p == NULL);
934 934
935 init_evtchn_cpu_bindings(); 935 init_evtchn_cpu_bindings();